-------------------------------------------------------------------------------- I1 cache: 65536 B, 64 B, 4-way associative D1 cache: 32768 B, 64 B, 8-way associative LL cache: 67108864 B, 64 B, 64-way associative Command: /usr/home/liquid/.rustup/toolchains/w-profiling/bin/rustc --crate-name primitive_types --edition=2018 src/lib.rs --error-format=json --json=diagnostic-rendered-ansi,artifacts,future-incompat --crate-type lib --emit=dep-info,metadata,link -C opt-level=3 -C embed-bitcode=no --cfg feature="default" --cfg feature="impl-codec" --cfg feature="std" -C metadata=4aee75345ebbe334 -C extra-filename=-4aee75345ebbe334 --out-dir /usr/home/liquid/tmp/.tmpPNfnss/target/release/deps -L dependency=/usr/home/liquid/tmp/.tmpPNfnss/target/release/deps --extern fixed_hash=/usr/home/liquid/tmp/.tmpPNfnss/target/release/deps/libfixed_hash-3d92b355520ac12a.rmeta --extern impl_codec=/usr/home/liquid/tmp/.tmpPNfnss/target/release/deps/libimpl_codec-22fe37bbfb79d61b.rmeta --extern uint=/usr/home/liquid/tmp/.tmpPNfnss/target/release/deps/libuint-4bf069ab53e21470.rmeta -Adeprecated -Aunknown-lints -Zincremental-verify-ich Data file: results/cgout-w-profiling-primitive-types-0.10.1-Opt-Full Events recorded: Ir Events shown: Ir Event sort order: Ir Thresholds: 0.1 Include dirs: User annotated: Auto-annotation: on -------------------------------------------------------------------------------- Ir -------------------------------------------------------------------------------- 24,042,947,465 (100.0%) PROGRAM TOTALS -------------------------------------------------------------------------------- Ir file:function -------------------------------------------------------------------------------- 1,340,703,711 ( 5.58%) ./malloc/malloc.c:_int_free 825,298,533 ( 3.43%) ./malloc/malloc.c:malloc 759,434,193 ( 3.16%) ???:computeKnownBits(llvm::Value const*, llvm::APInt const&, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) 601,879,524 ( 2.50%) ./malloc/malloc.c:_int_malloc 521,772,092 ( 2.17%) ???:(anonymous namespace)::LazyValueInfoImpl::getEdgeValue(llvm::Value*, llvm::BasicBlock*, llvm::BasicBlock*, llvm::Instruction*) [clone .llvm.4316243980339171764] 444,357,732 ( 1.85%) ./malloc/malloc.c:free 388,756,359 ( 1.62%) ???:(anonymous namespace)::LazyValueInfoImpl::solve() [clone .llvm.4316243980339171764] 363,844,985 ( 1.51%) ???:computeKnownBits(llvm::Value const*, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) [clone .llvm.15619146473165121143] 350,532,564 ( 1.46%) ???:llvm::InstCombinerImpl::run() 323,607,905 ( 1.35%) ???:computeKnownBitsFromOperator(llvm::Operator const*, llvm::APInt const&, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) 251,677,585 ( 1.05%) ???:llvm::SelectionDAG::Combine(llvm::CombineLevel, llvm::AAResults*, llvm::CodeGenOpt::Level) 239,226,489 ( 0.99%) ???:llvm::FPPassManager::runOnFunction(llvm::Function&) 225,459,197 ( 0.94%) ???:llvm::KnownBits::mul(llvm::KnownBits const&, llvm::KnownBits const&) 222,887,331 ( 0.93%) ???:computeKnownBitsFromAssume(llvm::Value const*, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) 221,563,966 ( 0.92%) ???:llvm::ValueHandleBase::AddToUseList() 220,455,819 ( 0.92%) ???:llvm::DataLayout::getTypeSizeInBits(llvm::Type*) const 209,503,165 ( 0.87%) ???:combineInstructionsOverFunction(llvm::Function&, llvm::InstCombineWorklist&, llvm::AAResults*, llvm::AssumptionCache&, llvm::TargetLibraryInfo&, llvm::TargetTransformInfo&, llvm::DominatorTree&, llvm::OptimizationRemarkEmitter&, llvm::BlockFrequencyInfo*, llvm::ProfileSummaryInfo*, unsigned int, llvm::LoopInfo*) 191,683,926 ( 0.80%) ???:llvm::BasicAAResult::alias(llvm::MemoryLocation const&, llvm::MemoryLocation const&, llvm::AAQueryInfo&) 183,789,969 ( 0.76%) /tmp/gcc-build/x86_64-unknown-linux-gnu/libstdc++-v3/libsupc++/../../../../gcc-5.5.0/libstdc++-v3/libsupc++/new_op.cc:operator new(unsigned long) 171,511,725 ( 0.71%) ???:bool llvm::DenseMapBase*, llvm::DenseMapInfo<(anonymous namespace)::SimpleValue>, llvm::detail::DenseMapPair<(anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*> >, (anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*, llvm::DenseMapInfo<(anonymous namespace)::SimpleValue>, llvm::detail::DenseMapPair<(anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*> >::LookupBucketFor<(anonymous namespace)::SimpleValue>((anonymous namespace)::SimpleValue const&, llvm::detail::DenseMapPair<(anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*> const*&) const 137,825,480 ( 0.57%) ???:runCVP(llvm::Module&) [clone .llvm.11785992503873176614] 120,374,520 ( 0.50%) ???:programUndefinedIfUndefOrPoison(llvm::Value const*, bool) [clone .llvm.15619146473165121143] 118,950,623 ( 0.49%) ???:llvm::DataLayout::getAlignment(llvm::Type*, bool) const 118,473,724 ( 0.49%) ???:llvm::InstCombinerImpl::SimplifyDemandedUseBits(llvm::Value*, llvm::APInt, llvm::KnownBits&, unsigned int, llvm::Instruction*) 97,823,151 ( 0.41%) ???:isKnownNonZero(llvm::Value const*, llvm::APInt const&, unsigned int, (anonymous namespace)::Query const&) [clone .llvm.15619146473165121143] 92,656,504 ( 0.39%) ./string/../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:__memcpy_avx_unaligned_erms 88,695,501 ( 0.37%) ???:llvm::AnalysisManager::getResultImpl(llvm::AnalysisKey*, llvm::Function&) 88,107,211 ( 0.37%) ???:llvm::SimplifyInstruction(llvm::Instruction*, llvm::SimplifyQuery const&, llvm::OptimizationRemarkEmitter*) 84,359,093 ( 0.35%) ???:llvm::KnownBits::lshr(llvm::KnownBits const&, llvm::KnownBits const&) 83,942,151 ( 0.35%) ???:llvm::AnalysisManager::invalidate(llvm::Function&, llvm::PreservedAnalyses const&) 78,013,598 ( 0.32%) ???:(anonymous namespace)::MachineCopyPropagation::runOnMachineFunction(llvm::MachineFunction&) 74,693,805 ( 0.31%) ???:llvm::InstCombinerImpl::visitCallInst(llvm::CallInst&) 74,125,488 ( 0.31%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::CalculateFromScratch(llvm::DominatorTreeBase&, llvm::DomTreeBuilder::SemiNCAInfo >::BatchUpdateInfo*) 72,906,164 ( 0.30%) ???:llvm::SelectionDAGISel::SelectCodeCommon(llvm::SDNode*, unsigned char const*, unsigned int) 72,822,471 ( 0.30%) ./string/../sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S:__memset_avx2_erms 70,459,528 ( 0.29%) ???:llvm::InterferenceCache::Entry::update(unsigned int) 69,585,593 ( 0.29%) ./malloc/malloc.c:malloc_consolidate 69,012,126 ( 0.29%) ???:SimplifyICmpInst(unsigned int, llvm::Value*, llvm::Value*, llvm::SimplifyQuery const&, unsigned int) [clone .llvm.1619516508949622737] 68,566,565 ( 0.29%) ???:llvm::isNonEscapingLocalObject(llvm::Value const*, llvm::SmallDenseMap, llvm::detail::DenseMapPair >*) 67,575,142 ( 0.28%) ???:llvm::TargetLoweringBase::getTypeConversion(llvm::LLVMContext&, llvm::EVT) const 65,631,977 ( 0.27%) ???:llvm::LiveVariables::runOnBlock(llvm::MachineBasicBlock*, unsigned int) 63,160,033 ( 0.26%) ???:llvm::SelectionDAG::computeKnownBits(llvm::SDValue, llvm::APInt const&, unsigned int) const 60,988,422 ( 0.25%) ???:llvm::InstCombinerImpl::visitICmpInst(llvm::ICmpInst&) 60,289,421 ( 0.25%) ???:llvm::GVN::processBlock(llvm::BasicBlock*) 60,209,466 ( 0.25%) ???:llvm::KnownBits::computeForAddSub(bool, bool, llvm::KnownBits const&, llvm::KnownBits) 59,553,452 ( 0.25%) ???:llvm::removeUnreachableBlocks(llvm::Function&, llvm::DomTreeUpdater*, llvm::MemorySSAUpdater*) 56,709,090 ( 0.24%) ???:llvm::SmallDenseMap, llvm::detail::DenseMapPair >::grow(unsigned int) 56,235,199 ( 0.23%) ???:llvm::DemandedBits::isInstructionDead(llvm::Instruction*) 53,887,813 ( 0.22%) ./malloc/malloc.c:unlink_chunk.constprop.0 53,668,675 ( 0.22%) ./string/../sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S:__memcmp_avx2_movbe 53,424,240 ( 0.22%) /tmp/gcc-build/x86_64-unknown-linux-gnu/libstdc++-v3/libsupc++/../../../../gcc-5.5.0/libstdc++-v3/libsupc++/new_opv.cc:operator new[](unsigned long) 53,158,609 ( 0.22%) ???:(anonymous namespace)::LazyValueInfoImpl::getValueInBlock(llvm::Value*, llvm::BasicBlock*, llvm::Instruction*) [clone .llvm.4316243980339171764] 52,901,965 ( 0.22%) ???:llvm::Type::getPrimitiveSizeInBits() const 52,888,164 ( 0.22%) ???:(anonymous namespace)::RAGreedy::tryAssign(llvm::LiveInterval&, llvm::AllocationOrder&, llvm::SmallVectorImpl&, llvm::SmallSet > const&) 52,609,966 ( 0.22%) ???:llvm::X86TTIImpl::getCastInstrCost(unsigned int, llvm::Type*, llvm::Type*, llvm::TargetTransformInfo::CastContextHint, llvm::TargetTransformInfo::TargetCostKind, llvm::Instruction const*) 52,022,278 ( 0.22%) ???:llvm::AttributeList::addAttributes(llvm::LLVMContext&, unsigned int, llvm::AttrBuilder const&) const 51,779,126 ( 0.22%) ???:llvm::LiveIntervals::HMEditor::updateAllRanges(llvm::MachineInstr*) 51,524,390 ( 0.21%) ???:(anonymous namespace)::EarlyCSE::run() [clone .llvm.7062997131228810369] 50,954,967 ( 0.21%) ???:llvm::Value::stripAndAccumulateConstantOffsets(llvm::DataLayout const&, llvm::APInt&, bool, llvm::function_ref) const 49,720,083 ( 0.21%) ???:llvm::APInt::initSlowCase(unsigned long, bool) 48,428,117 ( 0.20%) ???:(anonymous namespace)::Verifier::visitInstruction(llvm::Instruction&) 48,347,814 ( 0.20%) ???:llvm::SmallPtrSetImplBase::insert_imp_big(void const*) 47,975,734 ( 0.20%) ???:llvm::ConstantRange::makeExactICmpRegion(llvm::CmpInst::Predicate, llvm::APInt const&) 47,594,157 ( 0.20%) ???:(anonymous namespace)::DAGCombiner::combine(llvm::SDNode*) 45,925,015 ( 0.19%) ???:llvm::TargetLibraryInfoImpl::getLibFunc(llvm::Function const&, llvm::LibFunc&) const 45,426,570 ( 0.19%) ???:llvm::APInt::operator<<=(unsigned int) 45,204,519 ( 0.19%) ???:llvm::LivePhysRegs::stepBackward(llvm::MachineInstr const&) 45,010,363 ( 0.19%) ???:(anonymous namespace)::eliminateDeadStores(llvm::Function&, llvm::AAResults&, llvm::MemorySSA&, llvm::DominatorTree&, llvm::PostDominatorTree&, llvm::TargetLibraryInfo const&, llvm::LoopInfo const&) [clone .llvm.5769264623867638418] 44,830,687 ( 0.19%) /usr/home/liquid/rust/worktree-benchmarking/compiler/rustc_trait_selection/src/traits/fulfill.rs:>::process_obligations::> 44,777,780 ( 0.19%) ???:??? 44,494,608 ( 0.19%) ???:llvm::getObjectSize(llvm::Value const*, unsigned long&, llvm::DataLayout const&, llvm::TargetLibraryInfo const*, llvm::ObjectSizeOpts) 43,982,752 ( 0.18%) ???:isGuaranteedNotToBeUndefOrPoison(llvm::Value const*, llvm::AssumptionCache*, llvm::Instruction const*, llvm::DominatorTree const*, unsigned int, bool) [clone .llvm.15619146473165121143] 43,435,634 ( 0.18%) ???:llvm::InlineFunction(llvm::CallBase&, llvm::InlineFunctionInfo&, llvm::AAResults*, bool, llvm::Function*) 43,151,421 ( 0.18%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::runSemiNCA(llvm::DominatorTreeBase&, unsigned int) 41,359,766 ( 0.17%) ???:llvm::PMDataManager::verifyPreservedAnalysis(llvm::Pass*) 39,485,522 ( 0.16%) ???:(anonymous namespace)::PruningFunctionCloner::CloneBlock(llvm::BasicBlock const*, llvm::ilist_iterator, false, true>, std::vector >&) 39,314,608 ( 0.16%) ???:llvm::simplifyCFG(llvm::BasicBlock*, llvm::TargetTransformInfo const&, llvm::DomTreeUpdater*, llvm::SimplifyCFGOptions const&, llvm::ArrayRef) 38,771,931 ( 0.16%) ???:llvm::SROA::runOnAlloca(llvm::AllocaInst&) 38,717,970 ( 0.16%) ???:llvm::InstCombinerImpl::visitAdd(llvm::BinaryOperator&) 38,674,842 ( 0.16%) /usr/home/liquid/rust/worktree-benchmarking/compiler/rustc_data_structures/src/obligation_forest/mod.rs:>::process_obligations::> 38,326,344 ( 0.16%) ???:llvm::ScalarEvolution::getSCEV(llvm::Value*) 38,042,272 ( 0.16%) ???:llvm::BitstreamCursor::readRecord(unsigned int, llvm::SmallVectorImpl&, llvm::StringRef*) 37,796,062 ( 0.16%) ???:llvm::ScheduleDAGSDNodes::BuildSchedUnits() 37,785,071 ( 0.16%) ???:llvm::MachineInstr::addOperand(llvm::MachineFunction&, llvm::MachineOperand const&) 37,579,787 ( 0.16%) ???:getValueFromCondition(llvm::Value*, llvm::Value*, bool) 37,305,821 ( 0.16%) ???:(anonymous namespace)::LazyValueInfoImpl::getBlockValue(llvm::Value*, llvm::BasicBlock*) 36,826,372 ( 0.15%) ???:(anonymous namespace)::DeadMachineInstructionElim::eliminateDeadMI(llvm::MachineFunction&) 36,604,890 ( 0.15%) ???:llvm::APInt::reverseBits() const 36,260,519 ( 0.15%) ???:(anonymous namespace)::SimplifyCFGOpt::simplifyCondBranch(llvm::BranchInst*, llvm::IRBuilder&) 36,114,195 ( 0.15%) ???:llvm::Type::isSizedDerivedType(llvm::SmallPtrSetImpl*) const 35,822,616 ( 0.15%) ???:llvm::computeConstantRange(llvm::Value const*, bool, llvm::AssumptionCache*, llvm::Instruction const*, unsigned int) 35,705,941 ( 0.15%) ???:llvm::SimplifyGEPInst(llvm::Type*, llvm::ArrayRef, llvm::SimplifyQuery const&) 34,432,743 ( 0.14%) ???:llvm::InstCombinerImpl::visitLoadInst(llvm::LoadInst&) 34,040,930 ( 0.14%) ???:llvm::ConstantRange::multiply(llvm::ConstantRange const&) const 33,561,176 ( 0.14%) ???:llvm::InstCombinerImpl::visitStoreInst(llvm::StoreInst&) 33,269,765 ( 0.14%) ???:llvm::MemorySSA::buildMemorySSA(llvm::BatchAAResults&) 33,138,404 ( 0.14%) ???:llvm::FindFunctionBackedges(llvm::Function const&, llvm::SmallVectorImpl >&) 32,141,177 ( 0.13%) ???:llvm::GVN::performPRE(llvm::Function&) 31,670,377 ( 0.13%) ./string/../sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S:__memset_avx2_unaligned_erms 31,626,520 ( 0.13%) ./elf/dl-lookup.c:_dl_lookup_symbol_x 31,383,163 ( 0.13%) ???:updateCGAndAnalysisManagerForPass(llvm::LazyCallGraph&, llvm::LazyCallGraph::SCC&, llvm::LazyCallGraph::Node&, llvm::AnalysisManager&, llvm::CGSCCUpdateResult&, llvm::AnalysisManager&, bool) [clone .llvm.5426518467876156712] 30,840,854 ( 0.13%) ???:llvm::TargetLowering::SimplifyDemandedBits(llvm::SDValue, llvm::APInt const&, llvm::APInt const&, llvm::KnownBits&, llvm::TargetLowering::TargetLoweringOpt&, unsigned int, bool) const 30,822,262 ( 0.13%) ???:(anonymous namespace)::AggressiveDeadCodeElimination::performDeadCodeElimination() 30,767,255 ( 0.13%) ???:llvm::SCCPInstVisitor::solve() 30,561,066 ( 0.13%) ???:llvm::AttributeSetNode::get(llvm::LLVMContext&, llvm::AttrBuilder const&) 30,493,397 ( 0.13%) ???:llvm::ScalarEvolution::getAddExpr(llvm::SmallVectorImpl&, llvm::SCEV::NoWrapFlags, unsigned int) 30,462,046 ( 0.13%) ???:(anonymous namespace)::CVPLatticeFunc::ComputeInstructionState(llvm::Instruction&, llvm::DenseMap, llvm::PointerIntPairInfo > >, (anonymous namespace)::CVPLatticeVal, llvm::DenseMapInfo, llvm::PointerIntPairInfo > > >, llvm::detail::DenseMapPair, llvm::PointerIntPairInfo > >, (anonymous namespace)::CVPLatticeVal> >&, llvm::SparseSolver, llvm::PointerIntPairInfo > >, (anonymous namespace)::CVPLatticeVal, llvm::LatticeKeyInfo, llvm::PointerIntPairInfo > > > >&) 30,092,560 ( 0.13%) ???:llvm::DenseMapBase, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> >, llvm::DenseMapInfo >, llvm::detail::DenseMapPair, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> > > >, llvm::PoisoningVH, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> >, llvm::DenseMapInfo >, llvm::detail::DenseMapPair, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> > > >::destroyAll() [clone .llvm.4316243980339171764] 30,003,106 ( 0.12%) ???:llvm::TargetTransformInfo::Model::getUserCost(llvm::User const*, llvm::ArrayRef, llvm::TargetTransformInfo::TargetCostKind) 29,874,896 ( 0.12%) ???:llvm::APInt::lshrInPlace(unsigned int) 29,345,190 ( 0.12%) ???:llvm::AAResults::getModRefInfo(llvm::Instruction const*, llvm::Optional const&, llvm::AAQueryInfo&) 28,483,355 ( 0.12%) /usr/home/liquid/rust/worktree-benchmarking/compiler/rustc_infer/src/infer/mod.rs:>::process_obligations::> 28,471,851 ( 0.12%) ???:llvm::Intrinsic::getDeclaration(llvm::Module*, unsigned int, llvm::ArrayRef) 27,862,632 ( 0.12%) ???:(anonymous namespace)::JoinVals::computeAssignment(unsigned int, (anonymous namespace)::JoinVals&) 27,583,882 ( 0.11%) /usr/home/liquid/.cargo/registry/src/github.com-1ecc6299db9ec823/hashbrown-0.12.0/src/raw/mod.rs:, (), core::hash::BuildHasherDefault>>::from_hash::>::{closure#0}> 27,201,933 ( 0.11%) ???:llvm::LiveRegMatrix::checkInterference(llvm::LiveInterval&, llvm::MCRegister) 27,009,809 ( 0.11%) ???:llvm::InstrEmitter::EmitCopyFromReg(llvm::SDNode*, unsigned int, bool, bool, llvm::Register, llvm::DenseMap, llvm::detail::DenseMapPair >&) 26,783,246 ( 0.11%) ???:llvm::LiveRangeCalc::findReachingDefs(llvm::LiveRange&, llvm::MachineBasicBlock&, llvm::SlotIndex, unsigned int, llvm::ArrayRef) 26,782,348 ( 0.11%) ???:llvm::FoldingSetBase::FindNodeOrInsertPos(llvm::FoldingSetNodeID const&, void*&, llvm::FoldingSetBase::FoldingSetInfo const&) 26,747,837 ( 0.11%) ???:llvm::DAGTypeLegalizer::run() 26,428,471 ( 0.11%) ???:llvm::InstCombinerImpl::visitTrunc(llvm::TruncInst&) 26,311,412 ( 0.11%) ???:llvm::LoopBase::verifyLoop() const 26,181,174 ( 0.11%) ???:(anonymous namespace)::RegisterCoalescer::joinCopy(llvm::MachineInstr*, bool&) 25,995,377 ( 0.11%) ???:llvm::RegPressureTracker::recede(llvm::RegisterOperands const&, llvm::SmallVectorImpl*) 25,843,684 ( 0.11%) ???:ComputeNumSignBitsImpl(llvm::Value const*, llvm::APInt const&, unsigned int, (anonymous namespace)::Query const&) [clone .llvm.15619146473165121143] 25,613,189 ( 0.11%) ???:llvm::LiveIntervalUnion::Query::collectInterferingVRegs(unsigned int) 25,525,271 ( 0.11%) ???:llvm::SelectionDAG::getConstant(llvm::ConstantInt const&, llvm::SDLoc const&, llvm::EVT, bool, bool) 25,412,086 ( 0.11%) ???:llvm::coro::declaresIntrinsics(llvm::Module const&, std::initializer_list) 25,294,697 ( 0.11%) ???:llvm::LiveRange::extendInBlock(llvm::ArrayRef, llvm::SlotIndex, llvm::SlotIndex) 25,231,973 ( 0.10%) ???:(anonymous namespace)::RAGreedy::growRegion((anonymous namespace)::RAGreedy::GlobalSplitCandidate&) 25,197,460 ( 0.10%) ./elf/dl-lookup.c:do_lookup_x 25,009,101 ( 0.10%) ???:(anonymous namespace)::SelectionDAGLegalize::LegalizeOp(llvm::SDNode*) [clone .llvm.8386621111310650999] 24,985,294 ( 0.10%) ???:llvm::InstrEmitter::EmitMachineNode(llvm::SDNode*, bool, bool, llvm::DenseMap, llvm::detail::DenseMapPair >&) 24,900,504 ( 0.10%) ???:llvm::SelectionDAG::Legalize() 24,684,899 ( 0.10%) ???:llvm::MemoryDependenceResults::getNonLocalPointerDepFromBB(llvm::Instruction*, llvm::PHITransAddr const&, llvm::MemoryLocation const&, bool, llvm::BasicBlock*, llvm::SmallVectorImpl&, llvm::DenseMap, llvm::detail::DenseMapPair >&, bool, bool) 24,459,426 ( 0.10%) ???:llvm::BasicAAResult::getModRefInfo(llvm::CallBase const*, llvm::MemoryLocation const&, llvm::AAQueryInfo&) 24,206,984 ( 0.10%) ???:(anonymous namespace)::BitcodeReader::parseFunctionBody(llvm::Function*) 24,178,364 ( 0.10%) ???:llvm::ScalarEvolution::forgetValue(llvm::Value*) 24,132,777 ( 0.10%) ???:llvm::VirtRegAuxInfo::weightCalcHelper(llvm::LiveInterval&, llvm::SlotIndex*, llvm::SlotIndex*) -------------------------------------------------------------------------------- -- Auto-annotated source: /usr/home/liquid/.cargo/registry/src/github.com-1ecc6299db9ec823/hashbrown-0.12.0/src/raw/mod.rs -------------------------------------------------------------------------------- Ir -- line 111 ---------------------------------------- . const EMPTY: u8 = 0b1111_1111; . . /// Control byte value for a deleted bucket. . const DELETED: u8 = 0b1000_0000; . . /// Checks whether a control byte represents a full bucket (top bit is clear). . #[inline] . fn is_full(ctrl: u8) -> bool { 4,062,483 ( 0.02%) ctrl & 0x80 == 0 . } . . /// Checks whether a control byte represents a special value (top bit is set). . #[inline] . fn is_special(ctrl: u8) -> bool { . ctrl & 0x80 != 0 . } . . /// Checks whether a special control value is EMPTY (just check 1 bit). . #[inline] . fn special_is_empty(ctrl: u8) -> bool { . debug_assert!(is_special(ctrl)); 332,122 ( 0.00%) ctrl & 0x01 != 0 . } . . /// Primary hash function, used to select the initial bucket to probe from. . #[inline] . #[allow(clippy::cast_possible_truncation)] . fn h1(hash: u64) -> usize { . // On 32-bit platforms we simply ignore the higher hash bits. . hash as usize -- line 140 ---------------------------------------- -- line 143 ---------------------------------------- . /// Secondary hash function, saved in the low 7 bits of the control byte. . #[inline] . #[allow(clippy::cast_possible_truncation)] . fn h2(hash: u64) -> u8 { . // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit . // value, some hash functions (such as FxHash) produce a usize result . // instead, which means that the top 32 bits are 0 on 32-bit platforms. . let hash_len = usize::min(mem::size_of::(), mem::size_of::()); 82,536,533 ( 0.34%) let top7 = hash >> (hash_len * 8 - 7); . (top7 & 0x7f) as u8 // truncation . } . . /// Probe sequence based on triangular numbers, which is guaranteed (since our . /// table size is a power of two) to visit every group of elements exactly once. . /// . /// A triangular probe has us jump by 1 more group every time. So first we . /// jump by 1 group (meaning we just continue our linear scan), then 2 groups -- line 159 ---------------------------------------- -- line 170 ---------------------------------------- . #[inline] . fn move_next(&mut self, bucket_mask: usize) { . // We should have found an empty bucket by now and ended the probe. . debug_assert!( . self.stride <= bucket_mask, . "Went past end of probe sequence" . ); . 547,844 ( 0.00%) self.stride += Group::WIDTH; 547,844 ( 0.00%) self.pos += self.stride; 477,920 ( 0.00%) self.pos &= bucket_mask; . } . } . . /// Returns the number of buckets needed to hold the given number of items, . /// taking the maximum load factor into account. . /// . /// Returns `None` if an overflow occurs. . // Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258 . #[cfg_attr(target_os = "emscripten", inline(never))] . #[cfg_attr(not(target_os = "emscripten"), inline)] . fn capacity_to_buckets(cap: usize) -> Option { . debug_assert_ne!(cap, 0); . . // For small tables we require at least 1 empty bucket so that lookups are . // guaranteed to terminate if an element doesn't exist in the table. 356,064 ( 0.00%) if cap < 8 { . // We don't bother with a table size of 2 buckets since that can only . // hold a single element. Instead we skip directly to a 4 bucket table . // which can hold 3 elements. 788,200 ( 0.00%) return Some(if cap < 4 { 4 } else { 8 }); . } . . // Otherwise require 1/8 buckets to be empty (87.5% load) . // . // Be careful when modifying this, calculate_layout relies on the . // overflow check here. 122,352 ( 0.00%) let adjusted_cap = cap.checked_mul(8)? / 7; . . // Any overflows will have been caught by the checked_mul. Also, any . // rounding errors from the division above will be cleaned up by . // next_power_of_two (which can't overflow because of the previous division). . Some(adjusted_cap.next_power_of_two()) . } . . /// Returns the maximum effective capacity for the given bucket mask, taking . /// the maximum load factor into account. . #[inline] . fn bucket_mask_to_capacity(bucket_mask: usize) -> usize { 914,009 ( 0.00%) if bucket_mask < 8 { . // For tables with 1/2/4/8 buckets, we always reserve one empty slot. . // Keep in mind that the bucket mask is one less than the bucket count. . bucket_mask . } else { . // For larger tables we reserve 12.5% of the slots as empty. 151,260 ( 0.00%) ((bucket_mask + 1) / 8) * 7 . } . } . . /// Helper which allows the max calculation for ctrl_align to be statically computed for each T . /// while keeping the rest of `calculate_layout_for` independent of `T` . #[derive(Copy, Clone)] . struct TableLayout { . size: usize, -- line 233 ---------------------------------------- -- line 246 ---------------------------------------- . . #[inline] . fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> { . debug_assert!(buckets.is_power_of_two()); . . let TableLayout { size, ctrl_align } = self; . // Manual layout calculation since Layout methods are not yet stable. . let ctrl_offset = 737,735 ( 0.00%) size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1); 978,922 ( 0.00%) let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?; . . Some(( . unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }, . ctrl_offset, . )) . } . } . -- line 263 ---------------------------------------- -- line 337 ---------------------------------------- . } . } . #[cfg_attr(feature = "inline-more", inline)] . pub unsafe fn drop(&self) { . self.as_ptr().drop_in_place(); . } . #[inline] . pub unsafe fn read(&self) -> T { 1,434 ( 0.00%) self.as_ptr().read() . } . #[inline] . pub unsafe fn write(&self, val: T) { . self.as_ptr().write(val); . } . #[inline] . pub unsafe fn as_ref<'a>(&self) -> &'a T { . &*self.as_ptr() -- line 353 ---------------------------------------- -- line 422 ---------------------------------------- . /// Creates a new empty hash table without allocating any memory, using the . /// given allocator. . /// . /// In effect this returns a table with exactly 1 bucket. However we can . /// leave the data pointer dangling since that bucket is never written to . /// due to our load factor forcing us to always have at least 1 free bucket. . #[inline] . pub fn new_in(alloc: A) -> Self { 6,570 ( 0.00%) Self { . table: RawTableInner::new_in(alloc), . marker: PhantomData, . } . } . . /// Allocates a new hash table with the given number of buckets. . /// . /// The control bytes are left uninitialized. -- line 438 ---------------------------------------- -- line 440 ---------------------------------------- . unsafe fn new_uninitialized( . alloc: A, . buckets: usize, . fallibility: Fallibility, . ) -> Result { . debug_assert!(buckets.is_power_of_two()); . . Ok(Self { 940 ( 0.00%) table: RawTableInner::new_uninitialized( . alloc, . TableLayout::new::(), . buckets, . fallibility, . )?, . marker: PhantomData, . }) . } -- line 456 ---------------------------------------- -- line 458 ---------------------------------------- . /// Attempts to allocate a new hash table with at least enough capacity . /// for inserting the given number of elements without reallocating. . fn fallible_with_capacity( . alloc: A, . capacity: usize, . fallibility: Fallibility, . ) -> Result { . Ok(Self { 28,620 ( 0.00%) table: RawTableInner::fallible_with_capacity( . alloc, . TableLayout::new::(), . capacity, . fallibility, . )?, . marker: PhantomData, . }) . } -- line 474 ---------------------------------------- -- line 527 ---------------------------------------- . debug_assert_ne!(self.table.bucket_mask, 0); . debug_assert!(index < self.buckets()); . Bucket::from_base_index(self.data_end(), index) . } . . /// Erases an element from the table without dropping it. . #[cfg_attr(feature = "inline-more", inline)] . #[deprecated(since = "0.8.1", note = "use erase or remove instead")] 65,350 ( 0.00%) pub unsafe fn erase_no_drop(&mut self, item: &Bucket) { 65,350 ( 0.00%) let index = self.bucket_index(item); . self.table.erase(index); 130,700 ( 0.00%) } . . /// Erases an element from the table, dropping it in place. . #[cfg_attr(feature = "inline-more", inline)] . #[allow(clippy::needless_pass_by_value)] . #[allow(deprecated)] . pub unsafe fn erase(&mut self, item: Bucket) { . // Erase the element from the table first since drop might panic. 57,746 ( 0.00%) self.erase_no_drop(&item); . item.drop(); . } . . /// Finds and erases an element from the table, dropping it in place. . /// Returns true if an element was found. . #[cfg(feature = "raw")] . #[cfg_attr(feature = "inline-more", inline)] . pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool { -- line 554 ---------------------------------------- -- line 563 ---------------------------------------- . } . } . . /// Removes an element from the table, returning it. . #[cfg_attr(feature = "inline-more", inline)] . #[allow(clippy::needless_pass_by_value)] . #[allow(deprecated)] . pub unsafe fn remove(&mut self, item: Bucket) -> T { 72,954 ( 0.00%) self.erase_no_drop(&item); 468 ( 0.00%) item.read() . } . . /// Finds and removes an element from the table, returning it. . #[cfg_attr(feature = "inline-more", inline)] 1,636,350 ( 0.01%) pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { . // Avoid `Option::map` because it bloats LLVM IR. 4,580 ( 0.00%) match self.find(hash, eq) { 34,389 ( 0.00%) Some(bucket) => Some(unsafe { self.remove(bucket) }), 353,753 ( 0.00%) None => None, . } 2,224,645 ( 0.01%) } . . /// Marks all table buckets as empty without dropping their contents. . #[cfg_attr(feature = "inline-more", inline)] . pub fn clear_no_drop(&mut self) { . self.table.clear_no_drop(); . } . . /// Removes all elements from the table without freeing the backing memory. . #[cfg_attr(feature = "inline-more", inline)] . pub fn clear(&mut self) { . // Ensure that the table is reset even if one of the drops panic . let mut self_ = guard(self, |self_| self_.clear_no_drop()); . unsafe { 1 ( 0.00%) self_.drop_elements(); . } . } . 7 ( 0.00%) unsafe fn drop_elements(&mut self) { 44,796 ( 0.00%) if mem::needs_drop::() && !self.is_empty() { . for item in self.iter() { . item.drop(); . } . } 8 ( 0.00%) } . . /// Shrinks the table to fit `max(self.len(), min_size)` elements. . #[cfg_attr(feature = "inline-more", inline)] . pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) { . // Calculate the minimal number of elements that we need to reserve . // space for. . let min_size = usize::max(self.table.items, min_size); . if min_size == 0 { -- line 615 ---------------------------------------- -- line 642 ---------------------------------------- . } . } . } . . /// Ensures that at least `additional` items can be inserted into the table . /// without reallocation. . #[cfg_attr(feature = "inline-more", inline)] . pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { 924,836 ( 0.00%) if additional > self.table.growth_left { . // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. 647,661 ( 0.00%) if self . .reserve_rehash(additional, hasher, Fallibility::Infallible) . .is_err() . { . unsafe { hint::unreachable_unchecked() } . } . } . } . -- line 660 ---------------------------------------- -- line 671 ---------------------------------------- . } else { . Ok(()) . } . } . . /// Out-of-line slow path for `reserve` and `try_reserve`. . #[cold] . #[inline(never)] 1,411,385 ( 0.01%) fn reserve_rehash( . &mut self, . additional: usize, . hasher: impl Fn(&T) -> u64, . fallibility: Fallibility, . ) -> Result<(), TryReserveError> { . unsafe { . self.table.reserve_rehash_inner( . additional, -- line 687 ---------------------------------------- -- line 690 ---------------------------------------- . TableLayout::new::(), . if mem::needs_drop::() { . Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) . } else { . None . }, . ) . } 1,082,680 ( 0.00%) } . . /// Allocates a new table of a different size and moves the contents of the . /// current table into it. . fn resize( . &mut self, . capacity: usize, . hasher: impl Fn(&T) -> u64, . fallibility: Fallibility, -- line 706 ---------------------------------------- -- line 714 ---------------------------------------- . ) . } . } . . /// Inserts a new element into the table, and returns its raw bucket. . /// . /// This does not check if the given element already exists in the table. . #[cfg_attr(feature = "inline-more", inline)] 5,618,140 ( 0.02%) pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { . unsafe { . let mut index = self.table.find_insert_slot(hash); . . // We can avoid growing the table once we have reached our load . // factor if we are replacing a tombstone. This works since the . // number of EMPTY slots does not change in this case. 13,970 ( 0.00%) let old_ctrl = *self.table.ctrl(index); 3,892,272 ( 0.02%) if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) { . self.reserve(1, hasher); . index = self.table.find_insert_slot(hash); . } . . self.table.record_item_insert_at(index, old_ctrl, hash); . . let bucket = self.bucket(index); 4 ( 0.00%) bucket.write(value); . bucket . } 4,134,641 ( 0.02%) } . . /// Attempts to insert a new element without growing the table and return its raw bucket. . /// . /// Returns an `Err` containing the given element if inserting it would require growing the . /// table. . /// . /// This does not check if the given element already exists in the table. . #[cfg(feature = "raw")] -- line 749 ---------------------------------------- -- line 760 ---------------------------------------- . } . } . } . . /// Inserts a new element into the table, and returns a mutable reference to it. . /// . /// This does not check if the given element already exists in the table. . #[cfg_attr(feature = "inline-more", inline)] 1,293,448 ( 0.01%) pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T { 68 ( 0.00%) unsafe { self.insert(hash, value, hasher).as_mut() } 970,086 ( 0.00%) } . . /// Inserts a new element into the table, without growing the table. . /// . /// There must be enough space in the table to insert the new element. . /// . /// This does not check if the given element already exists in the table. . #[cfg_attr(feature = "inline-more", inline)] . #[cfg(any(feature = "raw", feature = "rustc-internal-api"))] 5,125 ( 0.00%) pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket { 922,178 ( 0.00%) let (index, old_ctrl) = self.table.prepare_insert_slot(hash); 58,107 ( 0.00%) let bucket = self.table.bucket(index); . . // If we are replacing a DELETED entry then we don't need to update . // the load counter. 1,896,394 ( 0.01%) self.table.growth_left -= special_is_empty(old_ctrl) as usize; . . bucket.write(value); 1,552,463 ( 0.01%) self.table.items += 1; . bucket 10,093 ( 0.00%) } . . /// Temporary removes a bucket, applying the given function to the removed . /// element and optionally put back the returned value in the same bucket. . /// . /// Returns `true` if the bucket still contains an element . /// . /// This does not check if the given bucket is actually occupied. . #[cfg_attr(feature = "inline-more", inline)] -- line 798 ---------------------------------------- -- line 813 ---------------------------------------- . true . } else { . false . } . } . . /// Searches for an element in the table. . #[inline] 66,630 ( 0.00%) pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { 55,811 ( 0.00%) let result = self.table.find_inner(hash, &mut |index| unsafe { 61,316 ( 0.00%) eq(self.bucket(index).as_ref()) 22,188 ( 0.00%) }); . . // Avoid `Option::map` because it bloats LLVM IR. . match result { 7,689 ( 0.00%) Some(index) => Some(unsafe { self.bucket(index) }), . None => None, . } 75,392 ( 0.00%) } . . /// Gets a reference to an element in the table. . #[inline] . pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { . // Avoid `Option::map` because it bloats LLVM IR. 66,078 ( 0.00%) match self.find(hash, eq) { . Some(bucket) => Some(unsafe { bucket.as_ref() }), . None => None, . } . } . . /// Gets a mutable reference to an element in the table. . #[inline] 2,009 ( 0.00%) pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { . // Avoid `Option::map` because it bloats LLVM IR. 35,517 ( 0.00%) match self.find(hash, eq) { . Some(bucket) => Some(unsafe { bucket.as_mut() }), . None => None, . } 2,296 ( 0.00%) } . . /// Attempts to get mutable references to `N` entries in the table at once. . /// . /// Returns an array of length `N` with the results of each query. . /// . /// At most one mutable reference will be returned to any entry. `None` will be returned if any . /// of the hashes are duplicates. `None` will be returned if the hash is not found. . /// -- line 859 ---------------------------------------- -- line 920 ---------------------------------------- . #[inline] . pub fn len(&self) -> usize { . self.table.items . } . . /// Returns `true` if the table contains no elements. . #[inline] . pub fn is_empty(&self) -> bool { 2,406,373 ( 0.01%) self.len() == 0 . } . . /// Returns the number of buckets in the table. . #[inline] . pub fn buckets(&self) -> usize { . self.table.bucket_mask + 1 . } . -- line 936 ---------------------------------------- -- line 938 ---------------------------------------- . /// the caller to ensure that the `RawTable` outlives the `RawIter`. . /// Because we cannot make the `next` method unsafe on the `RawIter` . /// struct, we have to make the `iter` method unsafe. . #[inline] . pub unsafe fn iter(&self) -> RawIter { . let data = Bucket::from_base_index(self.data_end(), 0); . RawIter { . iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()), 292,975 ( 0.00%) items: self.table.items, . } . } . . /// Returns an iterator over occupied buckets that could match a given hash. . /// . /// `RawTable` only stores 7 bits of the hash value, so this iterator may . /// return items that have a hash value different than the one provided. You . /// should always validate the returned values before using them. -- line 954 ---------------------------------------- -- line 995 ---------------------------------------- . /// Iteration starts at the provided iterator's current location. . /// . /// It is up to the caller to ensure that the iterator is valid for this . /// `RawTable` and covers all items that remain in the table. . pub unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { . debug_assert_eq!(iter.len(), self.len()); . . let alloc = self.table.alloc.clone(); 20,708 ( 0.00%) let allocation = self.into_allocation(); 15,531 ( 0.00%) RawIntoIter { 25,885 ( 0.00%) iter, . allocation, . marker: PhantomData, . alloc, . } . } . . /// Converts the table into a raw allocation. The contents of the table . /// should be dropped using a `RawIter` before freeing the allocation. . #[cfg_attr(feature = "inline-more", inline)] . pub(crate) fn into_allocation(self) -> Option<(NonNull, Layout)> { 9,548 ( 0.00%) let alloc = if self.table.is_empty_singleton() { . None . } else { . // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. . let (layout, ctrl_offset) = match calculate_layout::(self.table.buckets()) { . Some(lco) => lco, . None => unsafe { hint::unreachable_unchecked() }, . }; . Some(( 1,114 ( 0.00%) unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }, . layout, . )) . }; . mem::forget(self); . alloc . } . } . -- line 1033 ---------------------------------------- -- line 1042 ---------------------------------------- . T: Sync, . A: Sync, . { . } . . impl RawTableInner { . #[inline] . const fn new_in(alloc: A) -> Self { 1,917,883 ( 0.01%) Self { . // Be careful to cast the entire slice to a raw pointer. . ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, . bucket_mask: 0, . items: 0, . growth_left: 0, . alloc, . } . } . } . . impl RawTableInner { . #[cfg_attr(feature = "inline-more", inline)] 1,364,900 ( 0.01%) unsafe fn new_uninitialized( . alloc: A, . table_layout: TableLayout, . buckets: usize, . fallibility: Fallibility, . ) -> Result { . debug_assert!(buckets.is_power_of_two()); . . // Avoid `Option::ok_or_else` because it bloats LLVM IR. -- line 1071 ---------------------------------------- -- line 1078 ---------------------------------------- . // exceed `isize::MAX`. We can skip this check on 64-bit systems since . // such allocations will never succeed anyways. . // . // This mirrors what Vec does in the standard library. . if mem::size_of::() < 8 && layout.size() > isize::MAX as usize { . return Err(fallibility.capacity_overflow()); . } . 302,356 ( 0.00%) let ptr: NonNull = match do_alloc(&alloc, layout) { . Ok(block) => block.cast(), . Err(_) => return Err(fallibility.alloc_err(layout)), . }; . . let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); 748,680 ( 0.00%) Ok(Self { . ctrl, 303,000 ( 0.00%) bucket_mask: buckets - 1, . items: 0, . growth_left: bucket_mask_to_capacity(buckets - 1), . alloc, . }) 993,120 ( 0.00%) } . . #[inline] 74,436 ( 0.00%) fn fallible_with_capacity( . alloc: A, . table_layout: TableLayout, . capacity: usize, . fallibility: Fallibility, . ) -> Result { 19,524 ( 0.00%) if capacity == 0 { 15,569 ( 0.00%) Ok(Self::new_in(alloc)) . } else { . unsafe { . let buckets = . capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?; . 620,057 ( 0.00%) let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?; . result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes()); . 38,427 ( 0.00%) Ok(result) . } . } 74,436 ( 0.00%) } . . /// Searches for an empty or deleted bucket which is suitable for inserting . /// a new element and sets the hash for that slot. . /// . /// There must be at least 1 empty bucket in the table. . #[inline] 327,116 ( 0.00%) unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) { . let index = self.find_insert_slot(hash); 327,116 ( 0.00%) let old_ctrl = *self.ctrl(index); . self.set_ctrl_h2(index, hash); . (index, old_ctrl) 654,232 ( 0.00%) } . . /// Searches for an empty or deleted bucket which is suitable for inserting . /// a new element. . /// . /// There must be at least 1 empty bucket in the table. . #[inline] . fn find_insert_slot(&self, hash: u64) -> usize { . let mut probe_seq = self.probe_seq(hash); . loop { . unsafe { . let group = Group::load(self.ctrl(probe_seq.pos)); 2,638,612 ( 0.01%) if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() { 7,736,260 ( 0.03%) let result = (probe_seq.pos + bit) & self.bucket_mask; . . // In tables smaller than the group width, trailing control . // bytes outside the range of the table are filled with . // EMPTY entries. These will unfortunately trigger a . // match, but once masked may point to a full bucket that . // is already occupied. We detect this situation here and . // perform a second scan starting at the beginning of the . // table. This second scan is guaranteed to find an empty . // slot (due to the load factor) before hitting the trailing . // control bytes (containing EMPTY). 3,550,723 ( 0.01%) if unlikely(is_full(*self.ctrl(result))) { . debug_assert!(self.bucket_mask < Group::WIDTH); . debug_assert_ne!(probe_seq.pos, 0); . return Group::load_aligned(self.ctrl(0)) . .match_empty_or_deleted() . .lowest_set_bit_nonzero(); . } . . return result; -- line 1165 ---------------------------------------- -- line 1171 ---------------------------------------- . . /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of . /// code generated, but it is eliminated by LLVM optimizations. . #[inline] . fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option { . let h2_hash = h2(hash); . let mut probe_seq = self.probe_seq(hash); . 163,748 ( 0.00%) loop { . let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; . 9,295,678 ( 0.04%) for bit in group.match_byte(h2_hash) { 18,343,232 ( 0.08%) let index = (probe_seq.pos + bit) & self.bucket_mask; . 12,173,300 ( 0.05%) if likely(eq(index)) { . return Some(index); . } . } . 2,766,615 ( 0.01%) if likely(group.match_empty().any_bit_set()) { . return None; . } . . probe_seq.move_next(self.bucket_mask); . } . } . . #[allow(clippy::mut_mut)] . #[inline] . unsafe fn prepare_rehash_in_place(&mut self) { . // Bulk convert all full control bytes to DELETED, and all DELETED . // control bytes to EMPTY. This effectively frees up all buckets . // containing a DELETED entry. 12 ( 0.00%) for i in (0..self.buckets()).step_by(Group::WIDTH) { . let group = Group::load_aligned(self.ctrl(i)); . let group = group.convert_special_to_empty_and_full_to_deleted(); . group.store_aligned(self.ctrl(i)); . } . . // Fix up the trailing control bytes. See the comments in set_ctrl . // for the handling of tables smaller than the group width. 12 ( 0.00%) if self.buckets() < Group::WIDTH { . self.ctrl(0) . .copy_to(self.ctrl(Group::WIDTH), self.buckets()); . } else { . self.ctrl(0) . .copy_to(self.ctrl(self.buckets()), Group::WIDTH); . } . } . -- line 1220 ---------------------------------------- -- line 1225 ---------------------------------------- . Bucket::from_base_index(self.data_end(), index) . } . . #[inline] . unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 { . debug_assert_ne!(self.bucket_mask, 0); . debug_assert!(index < self.buckets()); . let base: *mut u8 = self.data_end().as_ptr(); 9,364,936 ( 0.04%) base.sub((index + 1) * size_of) . } . . #[inline] . unsafe fn data_end(&self) -> NonNull { . NonNull::new_unchecked(self.ctrl.as_ptr().cast()) . } . . /// Returns an iterator-like object for a probe sequence on the table. . /// . /// This iterator never terminates, but is guaranteed to visit each bucket . /// group exactly once. The loop using `probe_seq` must terminate upon . /// reaching a group containing an empty bucket. . #[inline] . fn probe_seq(&self, hash: u64) -> ProbeSeq { . ProbeSeq { 51,116,094 ( 0.21%) pos: h1(hash) & self.bucket_mask, . stride: 0, . } . } . . /// Returns the index of a bucket for which a value must be inserted if there is enough rooom . /// in the table, otherwise returns error . #[cfg(feature = "raw")] . #[inline] -- line 1257 ---------------------------------------- -- line 1263 ---------------------------------------- . } else { . self.record_item_insert_at(index, old_ctrl, hash); . Ok(index) . } . } . . #[inline] . unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) { 4,864,162 ( 0.02%) self.growth_left -= special_is_empty(old_ctrl) as usize; . self.set_ctrl_h2(index, hash); 3,891,296 ( 0.02%) self.items += 1; . } . . #[inline] . fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool { . let probe_seq_pos = self.probe_seq(hash).pos; . let probe_index = . |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH; 66 ( 0.00%) probe_index(i) == probe_index(new_i) . } . . /// Sets a control byte to the hash, and possibly also the replicated control byte at . /// the end of the array. . #[inline] . unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) { . self.set_ctrl(index, h2(hash)); . } -- line 1289 ---------------------------------------- -- line 1312 ---------------------------------------- . // replicate the buckets at the end of the trailing group. For example . // with 2 buckets and a group size of 4, the control bytes will look . // like this: . // . // Real | Replicated . // --------------------------------------------- . // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] | . // --------------------------------------------- 7,429,035 ( 0.03%) let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; . 2,475,563 ( 0.01%) *self.ctrl(index) = ctrl; 2,477,027 ( 0.01%) *self.ctrl(index2) = ctrl; . } . . /// Returns a pointer to a control byte. . #[inline] . unsafe fn ctrl(&self, index: usize) -> *mut u8 { . debug_assert!(index < self.num_ctrl_bytes()); . self.ctrl.as_ptr().add(index) . } . . #[inline] . fn buckets(&self) -> usize { 895,398 ( 0.00%) self.bucket_mask + 1 . } . . #[inline] . fn num_ctrl_bytes(&self) -> usize { 771,113 ( 0.00%) self.bucket_mask + 1 + Group::WIDTH . } . . #[inline] . fn is_empty_singleton(&self) -> bool { 3,495,032 ( 0.01%) self.bucket_mask == 0 . } . . #[allow(clippy::mut_mut)] . #[inline] . unsafe fn prepare_resize( . &self, . table_layout: TableLayout, . capacity: usize, . fallibility: Fallibility, . ) -> Result, TryReserveError> { . debug_assert!(self.items <= capacity); . . // Allocate and initialize the new table. 6,906 ( 0.00%) let mut new_table = RawTableInner::fallible_with_capacity( . self.alloc.clone(), . table_layout, . capacity, . fallibility, . )?; 404,741 ( 0.00%) new_table.growth_left -= self.items; . new_table.items = self.items; . . // The hash function may panic, in which case we simply free the new . // table without dropping any elements that may have been copied into . // it. . // . // This guard is also used to free the old table on success, see . // the comment at the bottom of this function. . Ok(guard(new_table, move |self_| { 173,928 ( 0.00%) if !self_.is_empty_singleton() { . self_.free_buckets(table_layout); . } . })) . } . . /// Reserves or rehashes to make room for `additional` more elements. . /// . /// This uses dynamic dispatch to reduce the amount of -- line 1383 ---------------------------------------- -- line 1388 ---------------------------------------- . &mut self, . additional: usize, . hasher: &dyn Fn(&mut Self, usize) -> u64, . fallibility: Fallibility, . layout: TableLayout, . drop: Option, . ) -> Result<(), TryReserveError> { . // Avoid `Option::ok_or_else` because it bloats LLVM IR. 347,869 ( 0.00%) let new_items = match self.items.checked_add(additional) { . Some(new_items) => new_items, . None => return Err(fallibility.capacity_overflow()), . }; 347,864 ( 0.00%) let full_capacity = bucket_mask_to_capacity(self.bucket_mask); 760,402 ( 0.00%) if new_items <= full_capacity / 2 { . // Rehash in-place without re-allocating if we have plenty of spare . // capacity that is locked up due to DELETED entries. . self.rehash_in_place(hasher, layout.size, drop); 8 ( 0.00%) Ok(()) . } else { . // Otherwise, conservatively resize to at least the next size up . // to avoid churning deletes into frequent rehashes. . self.resize_inner( 173,928 ( 0.00%) usize::max(new_items, full_capacity + 1), . hasher, . fallibility, . layout, . ) . } . } . . /// Allocates a new table of a different size and moves the contents of the -- line 1418 ---------------------------------------- -- line 1424 ---------------------------------------- . #[inline(always)] . unsafe fn resize_inner( . &mut self, . capacity: usize, . hasher: &dyn Fn(&mut Self, usize) -> u64, . fallibility: Fallibility, . layout: TableLayout, . ) -> Result<(), TryReserveError> { 17,546 ( 0.00%) let mut new_table = self.prepare_resize(layout, capacity, fallibility)?; . . // Copy all elements to the new table. . for i in 0..self.buckets() { 1,484,584 ( 0.01%) if !is_full(*self.ctrl(i)) { . continue; . } . . // This may panic. . let hash = hasher(self, i); . . // We can use a simpler version of insert() here since: . // - there are no DELETED entries. -- line 1444 ---------------------------------------- -- line 1454 ---------------------------------------- . } . . // We successfully copied all elements without panicking. Now replace . // self with the new table. The old table will have its memory freed but . // the items will not be dropped (since they have been moved into the . // new table). . mem::swap(self, &mut new_table); . 173,928 ( 0.00%) Ok(()) . } . . /// Rehashes the contents of the table in place (i.e. without changing the . /// allocation). . /// . /// If `hasher` panics then some the table's contents may be lost. . /// . /// This uses dynamic dispatch to reduce the amount of -- line 1470 ---------------------------------------- -- line 1496 ---------------------------------------- . } . self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items; . }); . . // At this point, DELETED elements are elements that we haven't . // rehashed yet. Find them and re-insert them at their ideal . // position. . 'outer: for i in 0..guard.buckets() { 256 ( 0.00%) if *guard.ctrl(i) != DELETED { . continue; . } . . let i_p = guard.bucket_ptr(i, size_of); . . 'inner: loop { . // Hash the current item . let hash = hasher(*guard, i); -- line 1512 ---------------------------------------- -- line 1515 ---------------------------------------- . let new_i = guard.find_insert_slot(hash); . let new_i_p = guard.bucket_ptr(new_i, size_of); . . // Probing works by scanning through all of the control . // bytes in groups, which may not be aligned to the group . // size. If both the new and old position fall within the . // same unaligned group, then there is no benefit in moving . // it and we can just continue to the next item. 22 ( 0.00%) if likely(guard.is_in_same_group(i, new_i, hash)) { . guard.set_ctrl_h2(i, hash); . continue 'outer; . } . . // We are moving the current item to a new position. Write . // our H2 to the control byte of the new position. . let prev_ctrl = guard.replace_ctrl_h2(new_i, hash); . if prev_ctrl == EMPTY { -- line 1531 ---------------------------------------- -- line 1541 ---------------------------------------- . // swapped into the old slot. . debug_assert_eq!(prev_ctrl, DELETED); . ptr::swap_nonoverlapping(i_p, new_i_p, size_of); . continue 'inner; . } . } . } . 12 ( 0.00%) guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items; . . mem::forget(guard); . } . . #[inline] . unsafe fn free_buckets(&mut self, table_layout: TableLayout) { . // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. . let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) { . Some(lco) => lco, . None => hint::unreachable_unchecked(), . }; . self.alloc.deallocate( 130,351 ( 0.00%) NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)), . layout, . ); . } . . /// Marks all table buckets as empty without dropping their contents. . #[inline] . fn clear_no_drop(&mut self) { 27,558 ( 0.00%) if !self.is_empty_singleton() { . unsafe { . self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes()); . } . } 35,770 ( 0.00%) self.items = 0; 27,562 ( 0.00%) self.growth_left = bucket_mask_to_capacity(self.bucket_mask); . } . . #[inline] . unsafe fn erase(&mut self, index: usize) { . debug_assert!(is_full(*self.ctrl(index))); 267,942 ( 0.00%) let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask; . let empty_before = Group::load(self.ctrl(index_before)).match_empty(); . let empty_after = Group::load(self.ctrl(index)).match_empty(); . . // If we are inside a continuous block of Group::WIDTH full or deleted . // cells then a probe window may have seen a full block when trying to . // insert. We therefore need to keep that block non-empty so that . // lookups will continue searching to the next probe window. . // . // Note that in this context `leading_zeros` refers to the bytes at the . // end of a group, while `trailing_zeros` refers to the bytes at the . // beginning of a group. 1,071,768 ( 0.00%) let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH { . DELETED . } else { 1,323,470 ( 0.01%) self.growth_left += 1; . EMPTY . }; . self.set_ctrl(index, ctrl); 1,071,768 ( 0.00%) self.items -= 1; . } . } . . impl Clone for RawTable { 17,536 ( 0.00%) fn clone(&self) -> Self { 2,401 ( 0.00%) if self.table.is_empty_singleton() { . Self::new_in(self.table.alloc.clone()) . } else { . unsafe { . let mut new_table = ManuallyDrop::new( . // Avoid `Result::ok_or_else` because it bloats LLVM IR. . match Self::new_uninitialized( . self.table.alloc.clone(), . self.table.buckets(), -- line 1615 ---------------------------------------- -- line 1624 ---------------------------------------- . // We need to free the memory allocated for the new table. . new_table.free_buckets(); . }); . . // Return the newly created table. . ManuallyDrop::into_inner(new_table) . } . } 19,728 ( 0.00%) } . . fn clone_from(&mut self, source: &Self) { . if source.table.is_empty_singleton() { . *self = Self::new_in(self.table.alloc.clone()); . } else { . unsafe { . // First, drop all our elements without clearing the control bytes. . self.drop_elements(); -- line 1640 ---------------------------------------- -- line 1687 ---------------------------------------- . .table . .ctrl(0) . .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); . source . .data_start() . .copy_to_nonoverlapping(self.data_start(), self.table.buckets()); . . self.table.items = source.table.items; 376 ( 0.00%) self.table.growth_left = source.table.growth_left; . } . } . . impl RawTable { . /// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`. . #[cfg_attr(feature = "inline-more", inline)] . unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) { . // Copy the control bytes unchanged. We do this in a single pass -- line 1703 ---------------------------------------- -- line 1790 ---------------------------------------- . fn default() -> Self { . Self::new_in(Default::default()) . } . } . . #[cfg(feature = "nightly")] . unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable { . #[cfg_attr(feature = "inline-more", inline)] 1,791,568 ( 0.01%) fn drop(&mut self) { 1,501,311 ( 0.01%) if !self.table.is_empty_singleton() { . unsafe { . self.drop_elements(); . self.free_buckets(); . } . } 1,894,232 ( 0.01%) } . } . #[cfg(not(feature = "nightly"))] . impl Drop for RawTable { . #[cfg_attr(feature = "inline-more", inline)] . fn drop(&mut self) { . if !self.table.is_empty_singleton() { . unsafe { . self.drop_elements(); -- line 1813 ---------------------------------------- -- line 1817 ---------------------------------------- . } . } . . impl IntoIterator for RawTable { . type Item = T; . type IntoIter = RawIntoIter; . . #[cfg_attr(feature = "inline-more", inline)] 20,708 ( 0.00%) fn into_iter(self) -> RawIntoIter { . unsafe { . let iter = self.iter(); . self.into_iter_from(iter) . } 25,885 ( 0.00%) } . } . . /// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does . /// not track an item count. . pub(crate) struct RawIterRange { . // Mask of full buckets in the current group. Bits are cleared from this . // mask as each element is processed. . current_group: BitMask, -- line 1838 ---------------------------------------- -- line 1934 ---------------------------------------- . . impl Iterator for RawIterRange { . type Item = Bucket; . . #[cfg_attr(feature = "inline-more", inline)] . fn next(&mut self) -> Option> { . unsafe { . loop { 898,497 ( 0.00%) if let Some(index) = self.current_group.lowest_set_bit() { 96,768 ( 0.00%) self.current_group = self.current_group.remove_lowest_bit(); 164,593 ( 0.00%) return Some(self.data.next_n(index)); . } . 1,117,356 ( 0.00%) if self.next_ctrl >= self.end { . return None; . } . . // We might read past self.end up to the next group boundary, . // but this is fine because it only occurs on tables smaller . // than the group size where the trailing control bytes are all . // EMPTY. On larger tables self.end is guaranteed to be aligned . // to the group size (since tables are power-of-two sized). 12,346 ( 0.00%) self.current_group = Group::load_aligned(self.next_ctrl).match_full(); 14,346 ( 0.00%) self.data = self.data.next_n(Group::WIDTH); 19,642 ( 0.00%) self.next_ctrl = self.next_ctrl.add(Group::WIDTH); . } . } . } . . #[inline] . fn size_hint(&self) -> (usize, Option) { . // We don't have an item count, so just guess based on the range size. . ( -- line 1966 ---------------------------------------- -- line 2102 ---------------------------------------- . } . } else { . // We must have already iterated past the removed item. . } . } . } . . unsafe fn drop_elements(&mut self) { 4,651 ( 0.00%) if mem::needs_drop::() && self.len() != 0 { . for item in self { . item.drop(); . } . } . } . } . . impl Clone for RawIter { -- line 2118 ---------------------------------------- -- line 2124 ---------------------------------------- . } . } . } . . impl Iterator for RawIter { . type Item = Bucket; . . #[cfg_attr(feature = "inline-more", inline)] 122,583 ( 0.00%) fn next(&mut self) -> Option> { 240,798 ( 0.00%) if let Some(b) = self.iter.next() { 2,659,975 ( 0.01%) self.items -= 1; . Some(b) . } else { . // We don't check against items == 0 here to allow the . // compiler to optimize away the item count entirely if the . // iterator length is never queried. . debug_assert_eq!(self.items, 0); . None . } 245,166 ( 0.00%) } . . #[inline] . fn size_hint(&self) -> (usize, Option) { . (self.items, Some(self.items)) . } . } . . impl ExactSizeIterator for RawIter {} -- line 2151 ---------------------------------------- -- line 2177 ---------------------------------------- . T: Sync, . A: Sync, . { . } . . #[cfg(feature = "nightly")] . unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter { . #[cfg_attr(feature = "inline-more", inline)] 6,944 ( 0.00%) fn drop(&mut self) { . unsafe { . // Drop all remaining elements . self.iter.drop_elements(); . . // Free the table 44,987 ( 0.00%) if let Some((ptr, layout)) = self.allocation { . self.alloc.deallocate(ptr, layout); . } . } 1,730 ( 0.00%) } . } . #[cfg(not(feature = "nightly"))] . impl Drop for RawIntoIter { . #[cfg_attr(feature = "inline-more", inline)] . fn drop(&mut self) { . unsafe { . // Drop all remaining elements . self.iter.drop_elements(); -- line 2203 ---------------------------------------- -- line 2209 ---------------------------------------- . } . } . } . . impl Iterator for RawIntoIter { . type Item = T; . . #[cfg_attr(feature = "inline-more", inline)] 3,560 ( 0.00%) fn next(&mut self) -> Option { 2,178 ( 0.00%) unsafe { Some(self.iter.next()?.read()) } 8,413 ( 0.00%) } . . #[inline] . fn size_hint(&self) -> (usize, Option) { 4 ( 0.00%) self.iter.size_hint() . } . } . . impl ExactSizeIterator for RawIntoIter {} . impl FusedIterator for RawIntoIter {} . . /// Iterator which consumes elements without freeing the table storage. . pub struct RawDrain<'a, T, A: Allocator + Clone = Global> { -- line 2231 ---------------------------------------- -- line 2259 ---------------------------------------- . where . T: Sync, . A: Sync, . { . } . . impl Drop for RawDrain<'_, T, A> { . #[cfg_attr(feature = "inline-more", inline)] 5,120 ( 0.00%) fn drop(&mut self) { . unsafe { . // Drop all remaining elements. Note that this may panic. . self.iter.drop_elements(); . . // Reset the contents of the table now that all elements have been . // dropped. . self.table.clear_no_drop(); . . // Move the now empty table back to its original location. 640 ( 0.00%) self.orig_table . .as_ptr() . .copy_from_nonoverlapping(&*self.table, 1); . } 5,120 ( 0.00%) } . } . . impl Iterator for RawDrain<'_, T, A> { . type Item = T; . . #[cfg_attr(feature = "inline-more", inline)] . fn next(&mut self) -> Option { . unsafe { -- line 2289 ---------------------------------------- 32,943,463 ( 0.14%) -------------------------------------------------------------------------------- -- Auto-annotated source: /usr/home/liquid/rust/worktree-benchmarking/compiler/rustc_trait_selection/src/traits/fulfill.rs -------------------------------------------------------------------------------- Ir -- line 33 ---------------------------------------- . . impl<'tcx> ForestObligation for PendingPredicateObligation<'tcx> { . /// Note that we include both the `ParamEnv` and the `Predicate`, . /// as the `ParamEnv` can influence whether fulfillment succeeds . /// or fails. . type CacheKey = ty::ParamEnvAnd<'tcx, ty::Predicate<'tcx>>; . . fn as_cache_key(&self) -> Self::CacheKey { 550,704 ( 0.00%) self.obligation.param_env.and(self.obligation.predicate) . } . } . . /// The fulfillment context is used to drive trait resolution. It . /// consists of a list of obligations that must be (eventually) . /// satisfied. The job is to track which are satisfied, which yielded . /// errors, and which are still pending. At any point, users can call . /// `select_where_possible`, and the fulfillment context will try to do -- line 49 ---------------------------------------- -- line 79 ---------------------------------------- . // outside of any snapshot, so any use of it inside a snapshot . // will lead to trouble and therefore is checked against, but . // other fulfillment contexts sometimes do live inside of . // a snapshot (they don't *straddle* a snapshot, so there . // is no trouble there). . usable_in_snapshot: bool, . } . 160 ( 0.00%) #[derive(Clone, Debug)] . pub struct PendingPredicateObligation<'tcx> { . pub obligation: PredicateObligation<'tcx>, . // This is far more often read than modified, meaning that we . // should mostly optimize for reading speed, while modifying is not as relevant. . // . // For whatever reason using a boxed slice is slower than using a `Vec` here. . pub stalled_on: Vec>, . } . . // `PendingPredicateObligation` is used a lot. Make sure it doesn't unintentionally get bigger. . #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] . static_assert_size!(PendingPredicateObligation<'_>, 72); . . impl<'a, 'tcx> FulfillmentContext<'tcx> { . /// Creates a new fulfillment context. 9,883 ( 0.00%) pub fn new() -> FulfillmentContext<'tcx> { 131,483 ( 0.00%) FulfillmentContext { 12,868 ( 0.00%) predicates: ObligationForest::new(), . relationships: FxHashMap::default(), . register_region_obligations: true, . usable_in_snapshot: false, . } 9,883 ( 0.00%) } . . pub fn new_in_snapshot() -> FulfillmentContext<'tcx> { . FulfillmentContext { . predicates: ObligationForest::new(), . relationships: FxHashMap::default(), . register_region_obligations: true, . usable_in_snapshot: true, . } . } . . pub fn new_ignoring_regions() -> FulfillmentContext<'tcx> { 29,904 ( 0.00%) FulfillmentContext { 7,476 ( 0.00%) predicates: ObligationForest::new(), . relationships: FxHashMap::default(), . register_region_obligations: false, . usable_in_snapshot: false, . } . } . . /// Attempts to select obligations using `selcx`. . fn select(&mut self, selcx: &mut SelectionContext<'a, 'tcx>) -> Vec> { 124,608 ( 0.00%) let span = debug_span!("select", obligation_forest_size = ?self.predicates.len()); . let _enter = span.enter(); . . let mut errors = Vec::new(); . . loop { . debug!("select: starting another iteration"); . . // Process pending obligations. . let outcome: Outcome<_, _> = 564,025 ( 0.00%) self.predicates.process_obligations(&mut FulfillProcessor { . selcx, 80,575 ( 0.00%) register_region_obligations: self.register_region_obligations, . }); . debug!("select: outcome={:#?}", outcome); . . // FIXME: if we kept the original cache key, we could mark projection . // obligations as complete for the projection cache here. . . errors.extend(outcome.errors.into_iter().map(to_fulfillment_error)); . . // If nothing new was added, no need to keep looping. 161,150 ( 0.00%) if outcome.stalled { . break; . } . } . . debug!( . "select({} predicates remaining, {} errors) done", . self.predicates.len(), . errors.len() -- line 162 ---------------------------------------- -- line 198 ---------------------------------------- . ); . self.register_predicate_obligations(infcx, obligations); . . debug!(?normalized_ty); . . normalized_ty . } . 330,798 ( 0.00%) fn register_predicate_obligation( . &mut self, . infcx: &InferCtxt<'_, 'tcx>, . obligation: PredicateObligation<'tcx>, . ) { . // this helps to reduce duplicate errors, as well as making . // debug output much nicer to read and so on. 165,399 ( 0.00%) let obligation = infcx.resolve_vars_if_possible(obligation); . . debug!(?obligation, "register_predicate_obligation"); . 330,798 ( 0.00%) assert!(!infcx.is_in_snapshot() || self.usable_in_snapshot); . 165,399 ( 0.00%) super::relationships::update(self, infcx, &obligation); . . self.predicates . .register_obligation(PendingPredicateObligation { obligation, stalled_on: vec![] }); 275,665 ( 0.00%) } . 268,464 ( 0.00%) fn select_all_or_error(&mut self, infcx: &InferCtxt<'_, 'tcx>) -> Vec> { . { 33,558 ( 0.00%) let errors = self.select_where_possible(infcx); 33,558 ( 0.00%) if !errors.is_empty() { 40 ( 0.00%) return errors; . } . } . 167,750 ( 0.00%) self.predicates.to_errors(CodeAmbiguity).into_iter().map(to_fulfillment_error).collect() 234,906 ( 0.00%) } . 623,040 ( 0.00%) fn select_where_possible( . &mut self, . infcx: &InferCtxt<'_, 'tcx>, . ) -> Vec> { . let mut selcx = SelectionContext::new(infcx); . self.select(&mut selcx) 560,736 ( 0.00%) } . 777 ( 0.00%) fn pending_obligations(&self) -> Vec> { . self.predicates.map_pending_obligations(|o| o.obligation.clone()) 1,036 ( 0.00%) } . . fn relationships(&mut self) -> &mut FxHashMap { 209 ( 0.00%) &mut self.relationships 209 ( 0.00%) } . } . . struct FulfillProcessor<'a, 'b, 'tcx> { . selcx: &'a mut SelectionContext<'b, 'tcx>, . register_region_obligations: bool, . } . . fn mk_pending(os: Vec>) -> Vec> { -- line 258 ---------------------------------------- -- line 275 ---------------------------------------- . #[inline(always)] . fn process_obligation( . &mut self, . pending_obligation: &mut Self::Obligation, . ) -> ProcessResult { . // If we were stalled on some unresolved variables, first check whether . // any of them have been resolved; if not, don't bother doing more work . // yet. 19,049,385 ( 0.08%) let change = match pending_obligation.stalled_on.len() { . // Match arms are in order of frequency, which matters because this . // code is so hot. 1 and 0 dominate; 2+ is fairly rare. . 1 => { 9,379,290 ( 0.04%) let infer_var = pending_obligation.stalled_on[0]; 3,126,430 ( 0.01%) self.selcx.infcx().ty_or_const_infer_var_changed(infer_var) . } . 0 => { . // In this case we haven't changed, but wish to make a change. . true . } . _ => { . // This `for` loop was once a call to `all()`, but this lower-level . // form was a perf win. See #64545 for details. 46,695 ( 0.00%) (|| { 90,093 ( 0.00%) for &infer_var in &pending_obligation.stalled_on { 14,068 ( 0.00%) if self.selcx.infcx().ty_or_const_infer_var_changed(infer_var) { . return true; . } . } . false . })() . } . }; . 2,508 ( 0.00%) if !change { . debug!( . "process_predicate: pending obligation {:?} still stalled on {:?}", . self.selcx.infcx().resolve_vars_if_possible(pending_obligation.obligation.clone()), . pending_obligation.stalled_on . ); . return ProcessResult::Unchanged; . } . 277,119 ( 0.00%) self.progress_changed_obligations(pending_obligation) . } . . fn process_backedge<'c, I>( . &mut self, . cycle: I, . _marker: PhantomData<&'c PendingPredicateObligation<'tcx>>, . ) where . I: Clone + Iterator>, -- line 325 ---------------------------------------- -- line 333 ---------------------------------------- . } . } . . impl<'a, 'b, 'tcx> FulfillProcessor<'a, 'b, 'tcx> { . // The code calling this method is extremely hot and only rarely . // actually uses this, so move this part of the code . // out of that loop. . #[inline(never)] 923,730 ( 0.00%) fn progress_changed_obligations( . &mut self, . pending_obligation: &mut PendingPredicateObligation<'tcx>, . ) -> ProcessResult, FulfillmentErrorCode<'tcx>> { . pending_obligation.stalled_on.truncate(0); . . let obligation = &mut pending_obligation.obligation; . 277,119 ( 0.00%) if obligation.predicate.has_infer_types_or_consts() { 64,867 ( 0.00%) obligation.predicate = 194,601 ( 0.00%) self.selcx.infcx().resolve_vars_if_possible(obligation.predicate); . } . . debug!(?obligation, ?obligation.cause, "process_obligation"); . . let infcx = self.selcx.infcx(); . 55,012 ( 0.00%) let binder = obligation.predicate.kind(); 646,611 ( 0.00%) match binder.no_bound_vars() { 550 ( 0.00%) None => match binder.skip_binder() { . // Evaluation will discard candidates using the leak check. . // This means we need to pass it the bound version of our . // predicate. . ty::PredicateKind::Trait(trait_ref) => { . let trait_obligation = obligation.with(binder.rebind(trait_ref)); . 100 ( 0.00%) self.process_trait_obligation( . obligation, 300 ( 0.00%) trait_obligation, . &mut pending_obligation.stalled_on, . ) . } . ty::PredicateKind::Projection(data) => { . let project_obligation = obligation.with(binder.rebind(data)); . 100 ( 0.00%) self.process_projection_obligation( . obligation, 300 ( 0.00%) project_obligation, . &mut pending_obligation.stalled_on, . ) . } . ty::PredicateKind::RegionOutlives(_) . | ty::PredicateKind::TypeOutlives(_) . | ty::PredicateKind::WellFormed(_) . | ty::PredicateKind::ObjectSafe(_) . | ty::PredicateKind::ClosureKind(..) -- line 386 ---------------------------------------- -- line 397 ---------------------------------------- . ty::PredicateKind::TypeWellFormedFromEnv(..) => { . bug!("TypeWellFormedFromEnv is only used for Chalk") . } . }, . Some(pred) => match pred { . ty::PredicateKind::Trait(data) => { . let trait_obligation = obligation.with(Binder::dummy(data)); . 251,620 ( 0.00%) self.process_trait_obligation( . obligation, 1,107,128 ( 0.00%) trait_obligation, . &mut pending_obligation.stalled_on, . ) . } . . ty::PredicateKind::RegionOutlives(data) => { 8,118 ( 0.00%) match infcx.region_outlives_predicate(&obligation.cause, Binder::dummy(data)) { 5,412 ( 0.00%) Ok(()) => ProcessResult::Changed(vec![]), . Err(_) => ProcessResult::Error(CodeSelectionError(Unimplemented)), . } . } . . ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(t_a, r_b)) => { 15,478 ( 0.00%) if self.register_region_obligations { 38,695 ( 0.00%) self.selcx.infcx().register_region_obligation_with_cause( . t_a, . r_b, . &obligation.cause, . ); . } . ProcessResult::Changed(vec![]) . } . . ty::PredicateKind::Projection(ref data) => { . let project_obligation = obligation.with(Binder::dummy(*data)); . 29,990 ( 0.00%) self.process_projection_obligation( . obligation, 89,970 ( 0.00%) project_obligation, . &mut pending_obligation.stalled_on, . ) . } . . ty::PredicateKind::ObjectSafe(trait_def_id) => { . if !self.selcx.tcx().is_object_safe(trait_def_id) { . ProcessResult::Error(CodeSelectionError(Unimplemented)) . } else { . ProcessResult::Changed(vec![]) . } . } . . ty::PredicateKind::ClosureKind(_, closure_substs, kind) => { 1,656 ( 0.00%) match self.selcx.infcx().closure_kind(closure_substs) { . Some(closure_kind) => { 3,312 ( 0.00%) if closure_kind.extends(kind) { . ProcessResult::Changed(vec![]) . } else { . ProcessResult::Error(CodeSelectionError(Unimplemented)) . } . } . None => ProcessResult::Unchanged, . } . } . . ty::PredicateKind::WellFormed(arg) => { 177,576 ( 0.00%) match wf::obligations( . self.selcx.infcx(), 50,736 ( 0.00%) obligation.param_env, 50,736 ( 0.00%) obligation.cause.body_id, 76,104 ( 0.00%) obligation.recursion_depth + 1, . arg, . obligation.cause.span, . ) { . None => { 26,236 ( 0.00%) pending_obligation.stalled_on = 45,913 ( 0.00%) vec![TyOrConstInferVar::maybe_from_generic_arg(arg).unwrap()]; 13,118 ( 0.00%) ProcessResult::Unchanged . } 56,427 ( 0.00%) Some(os) => ProcessResult::Changed(mk_pending(os)), . } . } . . ty::PredicateKind::Subtype(subtype) => { 4,024 ( 0.00%) match self.selcx.infcx().subtype_predicate( . &obligation.cause, . obligation.param_env, . Binder::dummy(subtype), . ) { . None => { . // None means that both are unresolved. 2,380 ( 0.00%) pending_obligation.stalled_on = vec![ 476 ( 0.00%) TyOrConstInferVar::maybe_from_ty(subtype.a).unwrap(), 952 ( 0.00%) TyOrConstInferVar::maybe_from_ty(subtype.b).unwrap(), . ]; . ProcessResult::Unchanged . } . Some(Ok(ok)) => ProcessResult::Changed(mk_pending(ok.obligations)), . Some(Err(err)) => { . let expected_found = . ExpectedFound::new(subtype.a_is_expected, subtype.a, subtype.b); . ProcessResult::Error(FulfillmentErrorCode::CodeSubtypeError( -- line 497 ---------------------------------------- -- line 498 ---------------------------------------- . expected_found, . err, . )) . } . } . } . . ty::PredicateKind::Coerce(coerce) => { 3,576 ( 0.00%) match self.selcx.infcx().coerce_predicate( . &obligation.cause, . obligation.param_env, . Binder::dummy(coerce), . ) { . None => { . // None means that both are unresolved. 670 ( 0.00%) pending_obligation.stalled_on = vec![ 134 ( 0.00%) TyOrConstInferVar::maybe_from_ty(coerce.a).unwrap(), 268 ( 0.00%) TyOrConstInferVar::maybe_from_ty(coerce.b).unwrap(), . ]; . ProcessResult::Unchanged . } . Some(Ok(ok)) => ProcessResult::Changed(mk_pending(ok.obligations)), . Some(Err(err)) => { . let expected_found = ExpectedFound::new(false, coerce.a, coerce.b); . ProcessResult::Error(FulfillmentErrorCode::CodeSubtypeError( . expected_found, . err, . )) . } . } . } . . ty::PredicateKind::ConstEvaluatable(uv) => { 234 ( 0.00%) match const_evaluatable::is_const_evaluatable( . self.selcx.infcx(), 468 ( 0.00%) uv, 78 ( 0.00%) obligation.param_env, 39 ( 0.00%) obligation.cause.span, . ) { . Ok(()) => ProcessResult::Changed(vec![]), . Err(NotConstEvaluatable::MentionsInfer) => { . pending_obligation.stalled_on.clear(); . pending_obligation.stalled_on.extend( . uv.substs . .iter() . .filter_map(TyOrConstInferVar::maybe_from_generic_arg), -- line 543 ---------------------------------------- -- line 639 ---------------------------------------- . } . } . } . ty::PredicateKind::TypeWellFormedFromEnv(..) => { . bug!("TypeWellFormedFromEnv is only used for Chalk") . } . }, . } 831,357 ( 0.00%) } . 956,631 ( 0.00%) #[instrument(level = "debug", skip(self, obligation, stalled_on))] . fn process_trait_obligation( . &mut self, . obligation: &PredicateObligation<'tcx>, . trait_obligation: TraitObligation<'tcx>, . stalled_on: &mut Vec>, . ) -> ProcessResult, FulfillmentErrorCode<'tcx>> { 50,349 ( 0.00%) let infcx = self.selcx.infcx(); 151,047 ( 0.00%) if obligation.predicate.is_global() { . // no type variables present, can use evaluation for better caching. . // FIXME: consider caching errors too. 44,792 ( 0.00%) if infcx.predicate_must_hold_considering_regions(obligation) { . debug!( . "selecting trait at depth {} evaluated to holds", . obligation.recursion_depth . ); 44,504 ( 0.00%) return ProcessResult::Changed(vec![]); . } . } . 196,661 ( 0.00%) match self.selcx.select(&trait_obligation) { . Ok(Some(impl_source)) => { . debug!("selecting trait at depth {} yielded Ok(Some)", obligation.recursion_depth); 204,228 ( 0.00%) ProcessResult::Changed(mk_pending(impl_source.nested_obligations())) . } . Ok(None) => { . debug!("selecting trait at depth {} yielded Ok(None)", obligation.recursion_depth); . . // This is a bit subtle: for the most part, the . // only reason we can fail to make progress on . // trait selection is because we don't have enough . // information about the types in the trait. . stalled_on.clear(); . stalled_on.extend(substs_infer_vars( . self.selcx, 16,742 ( 0.00%) trait_obligation.predicate.map_bound(|pred| pred.trait_ref.substs), . )); . . debug!( . "process_predicate: pending obligation {:?} now stalled on {:?}", . infcx.resolve_vars_if_possible(obligation.clone()), . stalled_on . ); . 16,742 ( 0.00%) ProcessResult::Unchanged . } . Err(selection_err) => { . debug!("selecting trait at depth {} yielded Err", obligation.recursion_depth); . 126 ( 0.00%) ProcessResult::Error(CodeSelectionError(selection_err)) . } . } . } . 66,253 ( 0.00%) fn process_projection_obligation( . &mut self, . obligation: &PredicateObligation<'tcx>, . project_obligation: PolyProjectionObligation<'tcx>, . stalled_on: &mut Vec>, . ) -> ProcessResult, FulfillmentErrorCode<'tcx>> { 6,023 ( 0.00%) let tcx = self.selcx.tcx(); . 18,069 ( 0.00%) if obligation.predicate.is_global() { . // no type variables present, can use evaluation for better caching. . // FIXME: consider caching errors too. 2,204 ( 0.00%) if self.selcx.infcx().predicate_must_hold_considering_regions(obligation) { 6,612 ( 0.00%) if let Some(key) = ProjectionCacheKey::from_poly_projection_predicate( . &mut self.selcx, 7,714 ( 0.00%) project_obligation.predicate, . ) { . // If `predicate_must_hold_considering_regions` succeeds, then we've . // evaluated all sub-obligations. We can therefore mark the 'root' . // obligation as complete, and skip evaluating sub-obligations. 6,612 ( 0.00%) self.selcx . .infcx() . .inner . .borrow_mut() . .projection_cache() . .complete(key, EvaluationResult::EvaluatedToOk); . } 2,204 ( 0.00%) return ProcessResult::Changed(vec![]); . } else { . tracing::debug!("Does NOT hold: {:?}", obligation); . } . } . 34,447 ( 0.00%) match project::poly_project_and_unify_type(self.selcx, &project_obligation) { . Ok(Ok(Some(os))) => ProcessResult::Changed(mk_pending(os)), . Ok(Ok(None)) => { . stalled_on.clear(); . stalled_on.extend(substs_infer_vars( . self.selcx, 3,112 ( 0.00%) project_obligation.predicate.map_bound(|pred| pred.projection_ty.substs), . )); 3,112 ( 0.00%) ProcessResult::Unchanged . } . // Let the caller handle the recursion . Ok(Err(project::InProgress)) => ProcessResult::Changed(mk_pending(vec![ . project_obligation.with(project_obligation.predicate.to_predicate(tcx)), . ])), . Err(e) => ProcessResult::Error(CodeProjectionError(e)), . } 48,184 ( 0.00%) } . } . . /// Returns the set of inference variables contained in `substs`. . fn substs_infer_vars<'a, 'tcx>( . selcx: &mut SelectionContext<'a, 'tcx>, . substs: ty::Binder<'tcx, SubstsRef<'tcx>>, . ) -> impl Iterator> { . selcx . .infcx() . .resolve_vars_if_possible(substs) . .skip_binder() // ok because this check doesn't care about regions . .iter() . .filter(|arg| arg.has_infer_types_or_consts()) . .flat_map(|arg| { 107,830 ( 0.00%) let mut walker = arg.walk(); 194,744 ( 0.00%) while let Some(c) = walker.next() { 21,696 ( 0.00%) if !c.has_infer_types_or_consts() { . walker.visited.remove(&c); 6 ( 0.00%) walker.skip_current_subtree(); . } . } . walker.visited.into_iter() . }) . .filter_map(TyOrConstInferVar::maybe_from_generic_arg) . } . . fn to_fulfillment_error<'tcx>( . error: Error, FulfillmentErrorCode<'tcx>>, . ) -> FulfillmentError<'tcx> { . let mut iter = error.backtrace.into_iter(); 9 ( 0.00%) let obligation = iter.next().unwrap().obligation; . // The root obligation is the last item in the backtrace - if there's only . // one item, then it's the same as the main obligation . let root_obligation = iter.next_back().map_or_else(|| obligation.clone(), |e| e.obligation); 270 ( 0.00%) FulfillmentError::new(obligation, error.error, root_obligation) . } 13,595,188 ( 0.06%) -------------------------------------------------------------------------------- -- Auto-annotated source: /usr/home/liquid/rust/worktree-benchmarking/compiler/rustc_data_structures/src/obligation_forest/mod.rs -------------------------------------------------------------------------------- Ir -- line 121 ---------------------------------------- . #[derive(Debug)] . pub enum ProcessResult { . Unchanged, . Changed(Vec), . Error(E), . } . . #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] 85,677 ( 0.00%) struct ObligationTreeId(usize); . . type ObligationTreeIdGenerator = . std::iter::Map, fn(usize) -> ObligationTreeId>; . . pub struct ObligationForest { . /// The list of obligations. In between calls to `process_obligations`, . /// this list only contains nodes in the `Pending` or `Waiting` state. . /// -- line 137 ---------------------------------------- -- line 181 ---------------------------------------- . has_parent: bool, . . /// Identifier of the obligation tree to which this node belongs. . obligation_tree_id: ObligationTreeId, . } . . impl Node { . fn new(parent: Option, obligation: O, obligation_tree_id: ObligationTreeId) -> Node { 1,052,880 ( 0.00%) Node { . obligation, . state: Cell::new(NodeState::Pending), 134,715 ( 0.00%) dependents: if let Some(parent_index) = parent { vec![parent_index] } else { vec![] }, . has_parent: parent.is_some(), . obligation_tree_id, . } . } . } . . /// The state of one node in some tree within the forest. This represents the . /// current state of processing for the obligation (of type `O`) associated -- line 200 ---------------------------------------- -- line 223 ---------------------------------------- . /// | compress() . /// v . /// (Removed) . /// ``` . /// The `Error` state can be introduced in several places, via `error_at()`. . /// . /// Outside of `ObligationForest` methods, nodes should be either `Pending` or . /// `Waiting`. 7,147,980 ( 0.03%) #[derive(Debug, Copy, Clone, PartialEq, Eq)] . enum NodeState { . /// This obligation has not yet been selected successfully. Cannot have . /// subobligations. . Pending, . . /// This obligation was selected successfully, but may or may not have . /// subobligations. . Success, -- line 239 ---------------------------------------- -- line 279 ---------------------------------------- . pub stalled: bool, . } . . impl OutcomeTrait for Outcome { . type Error = Error; . type Obligation = O; . . fn new() -> Self { 161,150 ( 0.00%) Self { stalled: true, errors: vec![] } . } . . fn mark_not_stalled(&mut self) { 65,350 ( 0.00%) self.stalled = false; . } . . fn is_stalled(&self) -> bool { 36,358 ( 0.00%) self.stalled . } . . fn record_completed(&mut self, _outcome: &Self::Obligation) { . // do nothing . } . . fn record_error(&mut self, error: Self::Error) { 144 ( 0.00%) self.errors.push(error) . } . } . . #[derive(Debug, PartialEq, Eq)] . pub struct Error { . pub error: E, . pub backtrace: Vec, . } . . impl ObligationForest { 20,337 ( 0.00%) pub fn new() -> ObligationForest { 244,044 ( 0.00%) ObligationForest { . nodes: vec![], . done_cache: Default::default(), . active_cache: Default::default(), . reused_node_vec: vec![], . obligation_tree_id_generator: (0..).map(ObligationTreeId), . error_cache: Default::default(), . } 20,337 ( 0.00%) } . . /// Returns the total number of nodes in the forest that have not . /// yet been fully resolved. . pub fn len(&self) -> usize { . self.nodes.len() . } . . /// Registers an obligation. . pub fn register_obligation(&mut self, obligation: O) { . // Ignore errors here - there is no guarantee of success. 716,729 ( 0.00%) let _ = self.register_obligation_at(obligation, None); . } . . // Returns Err(()) if we already know this obligation failed. 982,619 ( 0.00%) fn register_obligation_at(&mut self, obligation: O, parent: Option) -> Result<(), ()> { 178,658 ( 0.00%) let cache_key = obligation.as_cache_key(); 178,658 ( 0.00%) if self.done_cache.contains(&cache_key) { . debug!("register_obligation_at: ignoring already done obligation: {:?}", obligation); . return Ok(()); . } . 430,482 ( 0.00%) match self.active_cache.entry(cache_key) { . Entry::Occupied(o) => { 11,884 ( 0.00%) let node = &mut self.nodes[*o.get()]; 11,884 ( 0.00%) if let Some(parent_index) = parent { . // If the node is already in `active_cache`, it has already . // had its chance to be marked with a parent. So if it's . // not already present, just dump `parent` into the . // dependents as a non-parent. 8,847 ( 0.00%) if !node.dependents.contains(&parent_index) { . node.dependents.push(parent_index); . } . } 17,826 ( 0.00%) if let NodeState::Error = node.state.get() { Err(()) } else { Ok(()) } . } 197,415 ( 0.00%) Entry::Vacant(v) => { 394,830 ( 0.00%) let obligation_tree_id = match parent { 45,940 ( 0.00%) Some(parent_index) => self.nodes[parent_index].obligation_tree_id, . None => self.obligation_tree_id_generator.next().unwrap(), . }; . . let already_failed = parent.is_some() . && self . .error_cache . .get(&obligation_tree_id) . .map_or(false, |errors| errors.contains(v.key())); . . if already_failed { . Err(()) . } else { 65,805 ( 0.00%) let new_index = self.nodes.len(); . v.insert(new_index); . self.nodes.push(Node::new(parent, obligation, obligation_tree_id)); . Ok(()) . } . } . } 803,961 ( 0.00%) } . . /// Converts all remaining obligations to the given error. 234,850 ( 0.00%) pub fn to_errors(&mut self, error: E) -> Vec> { . let errors = self . .nodes . .iter() . .enumerate() . .filter(|(_index, node)| node.state.get() == NodeState::Pending) . .map(|(index, _node)| Error { error: error.clone(), backtrace: self.error_at(index) }) . .collect(); . 67,100 ( 0.00%) self.compress(|_| assert!(false)); . errors 201,300 ( 0.00%) } . . /// Returns the set of obligations that are in a pending state. . pub fn map_pending_obligations(&self, f: F) -> Vec

. where . F: Fn(&O) -> P, . { . self.nodes . .iter() . .filter(|node| node.state.get() == NodeState::Pending) . .map(|node| f(&node.obligation)) . .collect() . } . 112 ( 0.00%) fn insert_into_error_cache(&mut self, index: usize) { . let node = &self.nodes[index]; 16 ( 0.00%) self.error_cache 48 ( 0.00%) .entry(node.obligation_tree_id) . .or_default() . .insert(node.obligation.as_cache_key()); 128 ( 0.00%) } . . /// Performs a pass through the obligation list. This must . /// be called in a loop until `outcome.stalled` is false. . /// . /// This _cannot_ be unrolled (presently, at least). . #[inline(never)] 725,175 ( 0.00%) pub fn process_obligations(&mut self, processor: &mut P) -> OUT . where . P: ObligationProcessor, . OUT: OutcomeTrait>, . { . let mut outcome = OUT::new(); . . // Note that the loop body can append new nodes, and those new nodes . // will then be processed by subsequent iterations of the loop. . // . // We can't use an iterator for the loop because `self.nodes` is . // appended to and the borrow checker would complain. We also can't use . // `for index in 0..self.nodes.len() { ... }` because the range would . // be computed with the initial length, and we would miss the appended . // nodes. Therefore we use a `while` loop. . let mut index = 0; 3,319,776 ( 0.01%) while let Some(node) = self.nodes.get_mut(index) { . // `processor.process_obligation` can modify the predicate within . // `node.obligation`, and that predicate is the key used for . // `self.active_cache`. This means that `self.active_cache` can get . // out of sync with `nodes`. It's not very common, but it does . // happen, and code in `compress` has to allow for it. 6,478,402 ( 0.03%) if node.state.get() != NodeState::Pending { 31,401 ( 0.00%) index += 1; . continue; . } . 407,819 ( 0.00%) match processor.process_obligation(&mut node.obligation) { . ProcessResult::Unchanged => { . // No change in state. . } 261,364 ( 0.00%) ProcessResult::Changed(children) => { . // We are not (yet) stalled. . outcome.mark_not_stalled(); . node.state.set(NodeState::Success); . 329,756 ( 0.00%) for child in children { 581,332 ( 0.00%) let st = self.register_obligation_at(child, Some(index)); 68,392 ( 0.00%) if let Err(()) = st { . // Error already reported - propagate it . // to our node. . self.error_at(index); . } . } . } . ProcessResult::Error(err) => { . outcome.mark_not_stalled(); 297 ( 0.00%) outcome.record_error(Error { error: err, backtrace: self.error_at(index) }); . } . } 6,415,600 ( 0.03%) index += 1; . } . . // There's no need to perform marking, cycle processing and compression when nothing . // changed. 36,358 ( 0.00%) if !outcome.is_stalled() { . self.mark_successes(); . self.process_cycles(processor); 36,542 ( 0.00%) self.compress(|obl| outcome.record_completed(obl)); . } . . outcome 725,175 ( 0.00%) } . . /// Returns a vector of obligations for `p` and all of its . /// ancestors, putting them into the error state in the process. 63 ( 0.00%) fn error_at(&self, mut index: usize) -> Vec { . let mut error_stack: Vec = vec![]; . let mut trace = vec![]; . . loop { . let node = &self.nodes[index]; 16 ( 0.00%) node.state.set(NodeState::Error); . trace.push(node.obligation.clone()); 32 ( 0.00%) if node.has_parent { . // The first dependent is the parent, which is treated . // specially. . error_stack.extend(node.dependents.iter().skip(1)); 7 ( 0.00%) index = node.dependents[0]; . } else { . // No parent; treat all dependents non-specially. . error_stack.extend(node.dependents.iter()); . break; . } . } . . while let Some(index) = error_stack.pop() { -- line 508 ---------------------------------------- -- line 509 ---------------------------------------- . let node = &self.nodes[index]; . if node.state.get() != NodeState::Error { . node.state.set(NodeState::Error); . error_stack.extend(node.dependents.iter()); . } . } . . trace 72 ( 0.00%) } . . /// Mark all `Waiting` nodes as `Success`, except those that depend on a . /// pending node. . fn mark_successes(&self) { . // Convert all `Waiting` nodes to `Success`. . for node in &self.nodes { 1,495,088 ( 0.01%) if node.state.get() == NodeState::Waiting { . node.state.set(NodeState::Success); . } . } . . // Convert `Success` nodes that depend on a pending node back to . // `Waiting`. . for node in &self.nodes { 1,266,325 ( 0.01%) if node.state.get() == NodeState::Pending { . // This call site is hot. . self.inlined_mark_dependents_as_waiting(node); . } . } . } . . // This always-inlined function is for the hot call site. . #[inline(always)] . fn inlined_mark_dependents_as_waiting(&self, node: &Node) { 19,048 ( 0.00%) for &index in node.dependents.iter() { . let node = &self.nodes[index]; 19,048 ( 0.00%) let state = node.state.get(); 19,048 ( 0.00%) if state == NodeState::Success { . // This call site is cold. 35,466 ( 0.00%) self.uninlined_mark_dependents_as_waiting(node); . } else { . debug_assert!(state == NodeState::Waiting || state == NodeState::Error) . } . } . } . . // This never-inlined function is for the cold call site. . #[inline(never)] 82,754 ( 0.00%) fn uninlined_mark_dependents_as_waiting(&self, node: &Node) { . // Mark node Waiting in the cold uninlined code instead of the hot inlined . node.state.set(NodeState::Waiting); . self.inlined_mark_dependents_as_waiting(node) 94,576 ( 0.00%) } . . /// Report cycles between all `Success` nodes, and convert all `Success` . /// nodes to `Done`. This must be called after `mark_successes`. . fn process_cycles

(&mut self, processor: &mut P) . where . P: ObligationProcessor, . { 18,271 ( 0.00%) let mut stack = std::mem::take(&mut self.reused_node_vec); . for (index, node) in self.nodes.iter().enumerate() { . // For some benchmarks this state test is extremely hot. It's a win . // to handle the no-op cases immediately to avoid the cost of the . // function call. 1,266,325 ( 0.01%) if node.state.get() == NodeState::Success { 322,555 ( 0.00%) self.find_cycles_from_node(&mut stack, processor, index); . } . } . . debug_assert!(stack.is_empty()); 146,168 ( 0.00%) self.reused_node_vec = stack; . } . 811,206 ( 0.00%) fn find_cycles_from_node

(&self, stack: &mut Vec, processor: &mut P, index: usize) . where . P: ObligationProcessor, . { . let node = &self.nodes[index]; 180,268 ( 0.00%) if node.state.get() == NodeState::Success { 1,698 ( 0.00%) match stack.iter().rposition(|&n| n == index) { . None => { . stack.push(index); 25,623 ( 0.00%) for &dep_index in node.dependents.iter() { 102,492 ( 0.00%) self.find_cycles_from_node(stack, processor, dep_index); . } . stack.pop(); . node.state.set(NodeState::Done); . } . Some(rpos) => { . // Cycle detected. . processor.process_backedge( . stack[rpos..].iter().map(|&i| &self.nodes[i].obligation), . PhantomData, . ); . } . } . } 721,072 ( 0.00%) } . . /// Compresses the vector, removing all popped nodes. This adjusts the . /// indices and hence invalidates any outstanding indices. `process_cycles` . /// must be run beforehand to remove any cycles on `Success` nodes. . #[inline(never)] 381,018 ( 0.00%) fn compress(&mut self, mut outcome_cb: impl FnMut(&O)) { 51,821 ( 0.00%) let orig_nodes_len = self.nodes.len(); . let mut node_rewrites: Vec<_> = std::mem::take(&mut self.reused_node_vec); . debug_assert!(node_rewrites.is_empty()); . node_rewrites.extend(0..orig_nodes_len); . let mut dead_nodes = 0; . . // Move removable nodes to the end, preserving the order of the . // remaining nodes. . // . // LOOP INVARIANT: . // self.nodes[0..index - dead_nodes] are the first remaining nodes . // self.nodes[index - dead_nodes..index] are all dead . // self.nodes[index..] are unchanged . for index in 0..orig_nodes_len { . let node = &self.nodes[index]; 6,404,709 ( 0.03%) match node.state.get() { . NodeState::Pending | NodeState::Waiting => { 2,401,950 ( 0.01%) if dead_nodes > 0 { 843,102 ( 0.00%) self.nodes.swap(index, index - dead_nodes); 1,405,170 ( 0.01%) node_rewrites[index] -= dead_nodes; . } . } . NodeState::Done => { . // This lookup can fail because the contents of . // `self.active_cache` are not guaranteed to match those of . // `self.nodes`. See the comment in `process_obligation` . // for more details. 232,463 ( 0.00%) if let Some((predicate, _)) = 196,002 ( 0.00%) self.active_cache.remove_entry(&node.obligation.as_cache_key()) . { . self.done_cache.insert(predicate); . } else { . self.done_cache.insert(node.obligation.as_cache_key().clone()); . } . // Extract the success stories. . outcome_cb(&node.obligation); 130,668 ( 0.00%) node_rewrites[index] = orig_nodes_len; 130,668 ( 0.00%) dead_nodes += 1; . } . NodeState::Error => { . // We *intentionally* remove the node from the cache at this point. Otherwise . // tests must come up with a different type on every type error they . // check against. 48 ( 0.00%) self.active_cache.remove(&node.obligation.as_cache_key()); 32 ( 0.00%) self.insert_into_error_cache(index); 32 ( 0.00%) node_rewrites[index] = orig_nodes_len; 32 ( 0.00%) dead_nodes += 1; . } . NodeState::Success => unreachable!(), . } . } . . if dead_nodes > 0 { . // Remove the dead nodes and rewrite indices. 33,218 ( 0.00%) self.nodes.truncate(orig_nodes_len - dead_nodes); 16,609 ( 0.00%) self.apply_rewrites(&node_rewrites); . } . . node_rewrites.truncate(0); 207,284 ( 0.00%) self.reused_node_vec = node_rewrites; 414,568 ( 0.00%) } . . #[inline(never)] 149,481 ( 0.00%) fn apply_rewrites(&mut self, node_rewrites: &[usize]) { . let orig_nodes_len = node_rewrites.len(); . . for node in &mut self.nodes { . let mut i = 0; 985,328 ( 0.00%) while let Some(dependent) = node.dependents.get_mut(i) { 59,120 ( 0.00%) let new_index = node_rewrites[*dependent]; 29,560 ( 0.00%) if new_index >= orig_nodes_len { . node.dependents.swap_remove(i); . if i == 0 && node.has_parent { . // We just removed the parent. . node.has_parent = false; . } . } else { 14,780 ( 0.00%) *dependent = new_index; 29,560 ( 0.00%) i += 1; . } . } . } . . // This updating of `self.active_cache` is necessary because the . // removal of nodes within `compress` can fail. See above. . self.active_cache.retain(|_predicate, index| { 3,997,684 ( 0.02%) let new_index = node_rewrites[*index]; 1,998,842 ( 0.01%) if new_index >= orig_nodes_len { . false . } else { 970,548 ( 0.00%) *index = new_index; . true . } . }); 132,872 ( 0.00%) } . } 8,256,902 ( 0.03%) -------------------------------------------------------------------------------- -- Auto-annotated source: /usr/home/liquid/rust/worktree-benchmarking/compiler/rustc_infer/src/infer/mod.rs -------------------------------------------------------------------------------- Ir -- line 108 ---------------------------------------- . suppress_errors: bool, . }, . } . . impl RegionckMode { . /// Indicates that the MIR borrowck will repeat these region . /// checks, so we should ignore errors if NLL is (unconditionally) . /// enabled. 1,220 ( 0.00%) pub fn for_item_body(tcx: TyCtxt<'_>) -> Self { . // FIXME(Centril): Once we actually remove `::Migrate` also make . // this always `true` and then proceed to eliminate the dead code. 1,220 ( 0.00%) match tcx.borrowck_mode() { . // If we're on Migrate mode, report AST region errors . BorrowckMode::Migrate => RegionckMode::Erase { suppress_errors: false }, . . // If we're on MIR, don't report AST region errors as they should be reported by NLL . BorrowckMode::Mir => RegionckMode::Erase { suppress_errors: true }, . } 2,440 ( 0.00%) } . } . . /// This type contains all the things within `InferCtxt` that sit within a . /// `RefCell` and are involved with taking/rolling back snapshots. Snapshot . /// operations are hot enough that we want only one call to `borrow_mut` per . /// call to `start_snapshot` and `rollback_to`. . pub struct InferCtxtInner<'tcx> { . /// Cache for projections. This cache is snapshotted along with the infcx. -- line 134 ---------------------------------------- -- line 202 ---------------------------------------- . /// type instantiations (`ty::Infer`) to the actual opaque . /// type (`ty::Opaque`). Used during fallback to map unconstrained . /// opaque type inference variables to their corresponding . /// opaque type. . pub opaque_types_vars: FxHashMap, Ty<'tcx>>, . } . . impl<'tcx> InferCtxtInner<'tcx> { 164,515 ( 0.00%) fn new() -> InferCtxtInner<'tcx> { 1,118,702 ( 0.00%) InferCtxtInner { . projection_cache: Default::default(), . type_variable_storage: type_variable::TypeVariableStorage::new(), . undo_log: InferCtxtUndoLogs::default(), . const_unification_storage: ut::UnificationTableStorage::new(), . int_unification_storage: ut::UnificationTableStorage::new(), . float_unification_storage: ut::UnificationTableStorage::new(), 98,709 ( 0.00%) region_constraint_storage: Some(RegionConstraintStorage::new()), . region_obligations: vec![], . opaque_types: Default::default(), . opaque_types_vars: Default::default(), . } 197,418 ( 0.00%) } . . #[inline] . pub fn region_obligations(&self) -> &[(hir::HirId, RegionObligation<'tcx>)] { . &self.region_obligations . } . . #[inline] . pub fn projection_cache(&mut self) -> traits::ProjectionCache<'_, 'tcx> { 21,222 ( 0.00%) self.projection_cache.with_log(&mut self.undo_log) . } . . #[inline] . fn type_variables(&mut self) -> type_variable::TypeVariableTable<'_, 'tcx> { 3,967,552 ( 0.02%) self.type_variable_storage.with_log(&mut self.undo_log) . } . . #[inline] . fn int_unification_table( . &mut self, . ) -> ut::UnificationTable< . ut::InPlace< . ty::IntVid, . &mut ut::UnificationStorage, . &mut InferCtxtUndoLogs<'tcx>, . >, . > { 121,489 ( 0.00%) self.int_unification_storage.with_log(&mut self.undo_log) . } . . #[inline] . fn float_unification_table( . &mut self, . ) -> ut::UnificationTable< . ut::InPlace< . ty::FloatVid, -- line 258 ---------------------------------------- -- line 268 ---------------------------------------- . &mut self, . ) -> ut::UnificationTable< . ut::InPlace< . ty::ConstVid<'tcx>, . &mut ut::UnificationStorage>, . &mut InferCtxtUndoLogs<'tcx>, . >, . > { 28,295 ( 0.00%) self.const_unification_storage.with_log(&mut self.undo_log) . } . . #[inline] . pub fn unwrap_region_constraints(&mut self) -> RegionConstraintCollector<'_, 'tcx> { 142,652 ( 0.00%) self.region_constraint_storage . .as_mut() . .expect("region constraints already solved") 156,256 ( 0.00%) .with_log(&mut self.undo_log) . } . } . . pub struct InferCtxt<'a, 'tcx> { . pub tcx: TyCtxt<'tcx>, . . /// The `DefId` of the item in whose context we are performing inference or typeck. . /// It is used to check whether an opaque type use is a defining use. -- line 292 ---------------------------------------- -- line 361 ---------------------------------------- . /// item we are type-checking, and just consider those names as . /// part of the root universe. So this would only get incremented . /// when we enter into a higher-ranked (`for<..>`) type or trait . /// bound. . universe: Cell, . } . . /// See the `error_reporting` module for more details. 269,952 ( 0.00%) #[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable)] . pub enum ValuePairs<'tcx> { . Types(ExpectedFound>), . Regions(ExpectedFound>), . Consts(ExpectedFound<&'tcx ty::Const<'tcx>>), . TraitRefs(ExpectedFound>), . PolyTraitRefs(ExpectedFound>), . } . -- line 377 ---------------------------------------- -- line 383 ---------------------------------------- . pub struct TypeTrace<'tcx> { . cause: ObligationCause<'tcx>, . values: ValuePairs<'tcx>, . } . . /// The origin of a `r1 <= r2` constraint. . /// . /// See `error_reporting` module for more details 360,010 ( 0.00%) #[derive(Clone, Debug)] . pub enum SubregionOrigin<'tcx> { . /// Arose from a subtyping relation 22,988 ( 0.00%) Subtype(Box>), . . /// When casting `&'a T` to an `&'b Trait` object, . /// relating `'a` to `'b` . RelateObjectBound(Span), . . /// Some type parameter was instantiated with the given type, . /// and that type must outlive some region. 628 ( 0.00%) RelateParamBound(Span, Ty<'tcx>, Option), . . /// The given region parameter was instantiated with a region . /// that must outlive some other region. . RelateRegionParamBound(Span), . . /// Creating a pointer `b` to contents of another reference . Reborrow(Span), . . /// Creating a pointer `b` to contents of an upvar . ReborrowUpvar(Span, ty::UpvarId), . . /// Data with type `Ty<'tcx>` was borrowed 1,117 ( 0.00%) DataBorrowed(Ty<'tcx>, Span), . . /// (&'a &'b T) where a >= b 690 ( 0.00%) ReferenceOutlivesReferent(Ty<'tcx>, Span), . . /// Comparing the signature and requirements of an impl method against . /// the containing trait. . CompareImplMethodObligation { span: Span, impl_item_def_id: DefId, trait_item_def_id: DefId }, . . /// Comparing the signature and requirements of an impl associated type . /// against the containing trait . CompareImplTypeObligation { span: Span, impl_item_def_id: DefId, trait_item_def_id: DefId }, -- line 426 ---------------------------------------- -- line 554 ---------------------------------------- . defining_use_anchor: Option, . } . . pub trait TyCtxtInferExt<'tcx> { . fn infer_ctxt(self) -> InferCtxtBuilder<'tcx>; . } . . impl<'tcx> TyCtxtInferExt<'tcx> for TyCtxt<'tcx> { 32,903 ( 0.00%) fn infer_ctxt(self) -> InferCtxtBuilder<'tcx> { 98,709 ( 0.00%) InferCtxtBuilder { tcx: self, defining_use_anchor: None, fresh_typeck_results: None } 32,903 ( 0.00%) } . } . . impl<'tcx> InferCtxtBuilder<'tcx> { . /// Used only by `rustc_typeck` during body type-checking/inference, . /// will initialize `in_progress_typeck_results` with fresh `TypeckResults`. . /// Will also change the scope for opaque type defining use checks to the given owner. 27,522 ( 0.00%) pub fn with_fresh_in_progress_typeck_results(mut self, table_owner: LocalDefId) -> Self { 33,638 ( 0.00%) self.fresh_typeck_results = Some(RefCell::new(ty::TypeckResults::new(table_owner))); 15,290 ( 0.00%) self.with_opaque_type_inference(table_owner) 21,406 ( 0.00%) } . . /// Whenever the `InferCtxt` should be able to handle defining uses of opaque types, . /// you need to call this function. Otherwise the opaque type will be treated opaquely. . /// . /// It is only meant to be called in two places, for typeck . /// (via `with_fresh_in_progress_typeck_results`) and for the inference context used . /// in mir borrowck. 2,698 ( 0.00%) pub fn with_opaque_type_inference(mut self, defining_use_anchor: LocalDefId) -> Self { 1,349 ( 0.00%) self.defining_use_anchor = Some(defining_use_anchor); 8,814 ( 0.00%) self 4,047 ( 0.00%) } . . /// Given a canonical value `C` as a starting point, create an . /// inference context that contains each of the bound values . /// within instantiated as a fresh variable. The `f` closure is . /// invoked with the new infcx, along with the instantiated value . /// `V` and a substitution `S`. This substitution `S` maps from . /// the bound values in `C` to their instantiated values in `V` . /// (in other words, `S(C) = V`). 27,145 ( 0.00%) pub fn enter_with_canonical( . &mut self, . span: Span, . canonical: &Canonical<'tcx, T>, . f: impl for<'a> FnOnce(InferCtxt<'a, 'tcx>, T, CanonicalVarValues<'tcx>) -> R, . ) -> R . where . T: TypeFoldable<'tcx>, . { . self.enter(|infcx| { 40,054 ( 0.00%) let (value, subst) = 1,727 ( 0.00%) infcx.instantiate_canonical_with_fresh_inference_vars(span, canonical); 59,278 ( 0.00%) f(infcx, value, subst) . }) 29,749 ( 0.00%) } . 217,604 ( 0.00%) pub fn enter(&mut self, f: impl for<'a> FnOnce(InferCtxt<'a, 'tcx>) -> R) -> R { 86,170 ( 0.00%) let InferCtxtBuilder { tcx, defining_use_anchor, ref fresh_typeck_results } = *self; . let in_progress_typeck_results = fresh_typeck_results.as_ref(); 1,437,776 ( 0.01%) f(InferCtxt { . tcx, . defining_use_anchor, . in_progress_typeck_results, 32,903 ( 0.00%) inner: RefCell::new(InferCtxtInner::new()), . lexical_region_resolutions: RefCell::new(None), . selection_cache: Default::default(), . evaluation_cache: Default::default(), . reported_trait_errors: Default::default(), . reported_closure_mismatch: Default::default(), . tainted_by_errors_flag: Cell::new(false), 32,903 ( 0.00%) err_count_on_creation: tcx.sess.err_count(), . in_snapshot: Cell::new(false), . skip_leak_check: Cell::new(false), . universe: Cell::new(ty::UniverseIndex::ROOT), . }) 241,582 ( 0.00%) } . } . . impl<'tcx, T> InferOk<'tcx, T> { . pub fn unit(self) -> InferOk<'tcx, ()> { . InferOk { value: (), obligations: self.obligations } . } . . /// Extracts `value`, registering any obligations into `fulfill_cx`. . pub fn into_value_registering_obligations( . self, . infcx: &InferCtxt<'_, 'tcx>, . fulfill_cx: &mut dyn TraitEngine<'tcx>, . ) -> T { 751 ( 0.00%) let InferOk { value, obligations } = self; 2,834 ( 0.00%) for obligation in obligations { . fulfill_cx.register_predicate_obligation(infcx, obligation); . } . value . } . } . . impl<'tcx> InferOk<'tcx, ()> { 20,979 ( 0.00%) pub fn into_obligations(self) -> PredicateObligations<'tcx> { 83,916 ( 0.00%) self.obligations 20,979 ( 0.00%) } . } . . #[must_use = "once you start a snapshot, you should always consume it"] . pub struct CombinedSnapshot<'a, 'tcx> { . undo_snapshot: Snapshot<'tcx>, . region_constraints_snapshot: RegionSnapshot, . universe: ty::UniverseIndex, . was_in_snapshot: bool, -- line 662 ---------------------------------------- -- line 674 ---------------------------------------- . let canonical = self.canonicalize_query((a, b), &mut OriginalQueryValues::default()); . debug!("canonical consts: {:?}", &canonical.value); . . self.tcx.try_unify_abstract_consts(canonical.value) . } . . pub fn is_in_snapshot(&self) -> bool { . self.in_snapshot.get() 55,133 ( 0.00%) } . 400,344 ( 0.00%) pub fn freshen>(&self, t: T) -> T { 450,387 ( 0.00%) t.fold_with(&mut self.freshener()) 450,387 ( 0.00%) } . . /// Returns the origin of the type variable identified by `vid`, or `None` . /// if this is not a type variable. . /// . /// No attempt is made to resolve `ty`. 2,508 ( 0.00%) pub fn type_var_origin(&'a self, ty: Ty<'tcx>) -> Option { 5,016 ( 0.00%) match *ty.kind() { 1,209 ( 0.00%) ty::Infer(ty::TyVar(vid)) => { 4,836 ( 0.00%) Some(*self.inner.borrow_mut().type_variables().var_origin(vid)) . } 45 ( 0.00%) _ => None, . } 5,016 ( 0.00%) } . 50,043 ( 0.00%) pub fn freshener<'b>(&'b self) -> TypeFreshener<'b, 'tcx> { . freshen::TypeFreshener::new(self, false) 50,043 ( 0.00%) } . . /// Like `freshener`, but does not replace `'static` regions. 165,986 ( 0.00%) pub fn freshener_keep_static<'b>(&'b self) -> TypeFreshener<'b, 'tcx> { . freshen::TypeFreshener::new(self, true) 165,986 ( 0.00%) } . 5,716 ( 0.00%) pub fn unsolved_variables(&self) -> Vec> { 2,858 ( 0.00%) let mut inner = self.inner.borrow_mut(); 2,858 ( 0.00%) let mut vars: Vec> = inner . .type_variables() . .unsolved_variables() . .into_iter() 2,418 ( 0.00%) .map(|t| self.tcx.mk_ty_var(t)) . .collect(); . vars.extend( . (0..inner.int_unification_table().len()) . .map(|i| ty::IntVid { index: i as u32 }) 4,156 ( 0.00%) .filter(|&vid| inner.int_unification_table().probe_value(vid).is_none()) 45 ( 0.00%) .map(|v| self.tcx.mk_int_var(v)), . ); . vars.extend( . (0..inner.float_unification_table().len()) . .map(|i| ty::FloatVid { index: i as u32 }) . .filter(|&vid| inner.float_unification_table().probe_value(vid).is_none()) . .map(|v| self.tcx.mk_float_var(v)), . ); . vars 10,003 ( 0.00%) } . 99,170 ( 0.00%) fn combine_fields( . &'a self, . trace: TypeTrace<'tcx>, . param_env: ty::ParamEnv<'tcx>, . ) -> CombineFields<'a, 'tcx> { 424,244 ( 0.00%) CombineFields { . infcx: self, 1,060,610 ( 0.00%) trace, . cause: None, . param_env, . obligations: PredicateObligations::new(), . } 99,170 ( 0.00%) } . . /// Clear the "currently in a snapshot" flag, invoke the closure, . /// then restore the flag to its original value. This flag is a . /// debugging measure designed to detect cases where we start a . /// snapshot, create type variables, and register obligations . /// which may involve those type variables in the fulfillment cx, . /// potentially leaving "dangling type variables" behind. . /// In such cases, an assertion will fail when attempting to -- line 753 ---------------------------------------- -- line 755 ---------------------------------------- . /// better than grovelling through megabytes of `RUSTC_LOG` output. . /// . /// HOWEVER, in some cases the flag is unhelpful. In particular, we . /// sometimes create a "mini-fulfilment-cx" in which we enroll . /// obligations. As long as this fulfillment cx is fully drained . /// before we return, this is not a problem, as there won't be any . /// escaping obligations in the main cx. In those cases, you can . /// use this function. 112 ( 0.00%) pub fn save_and_restore_in_snapshot_flag(&self, func: F) -> R . where . F: FnOnce(&Self) -> R, . { . let flag = self.in_snapshot.replace(false); 14,844 ( 0.00%) let result = func(self); . self.in_snapshot.set(flag); . result 126 ( 0.00%) } . 595,088 ( 0.00%) fn start_snapshot(&self) -> CombinedSnapshot<'a, 'tcx> { . debug!("start_snapshot()"); . . let in_snapshot = self.in_snapshot.replace(true); . . let mut inner = self.inner.borrow_mut(); . 1,785,264 ( 0.01%) CombinedSnapshot { . undo_snapshot: inner.undo_log.start_snapshot(), . region_constraints_snapshot: inner.unwrap_region_constraints().start_snapshot(), . universe: self.universe(), . was_in_snapshot: in_snapshot, . // Borrow typeck results "in progress" (i.e., during typeck) . // to ban writes from within a snapshot to them. 297,544 ( 0.00%) _in_progress_typeck_results: self . .in_progress_typeck_results . .map(|typeck_results| typeck_results.borrow()), . } 1,190,176 ( 0.00%) } . 1,455,146 ( 0.01%) #[instrument(skip(self, snapshot), level = "debug")] . fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot<'a, 'tcx>) { . let CombinedSnapshot { 132,286 ( 0.00%) undo_snapshot, 132,286 ( 0.00%) region_constraints_snapshot, 132,286 ( 0.00%) universe, 132,286 ( 0.00%) was_in_snapshot, 264,572 ( 0.00%) _in_progress_typeck_results, . } = snapshot; . . self.in_snapshot.set(was_in_snapshot); . self.universe.set(universe); . . let mut inner = self.inner.borrow_mut(); 132,286 ( 0.00%) inner.rollback_to(undo_snapshot); . inner.unwrap_region_constraints().rollback_to(region_constraints_snapshot); . } . 2,478,870 ( 0.01%) #[instrument(skip(self, snapshot), level = "debug")] . fn commit_from(&self, snapshot: CombinedSnapshot<'a, 'tcx>) { . let CombinedSnapshot { 165,258 ( 0.00%) undo_snapshot, . region_constraints_snapshot: _, . universe: _, 165,258 ( 0.00%) was_in_snapshot, 330,516 ( 0.00%) _in_progress_typeck_results, . } = snapshot; . . self.in_snapshot.set(was_in_snapshot); . . self.inner.borrow_mut().commit(undo_snapshot); . } . . /// Executes `f` and commit the bindings. 137,272 ( 0.00%) #[instrument(skip(self, f), level = "debug")] 168,289 ( 0.00%) pub fn commit_unconditionally(&self, f: F) -> R . where . F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> R, . { 15,299 ( 0.00%) let snapshot = self.start_snapshot(); 48,475 ( 0.00%) let r = f(&snapshot); 107,093 ( 0.00%) self.commit_from(snapshot); 90,897 ( 0.00%) r . } . . /// Execute `f` and commit the bindings if closure `f` returns `Ok(_)`. 1,182,089 ( 0.00%) #[instrument(skip(self, f), level = "debug")] 1,461,513 ( 0.01%) pub fn commit_if_ok(&self, f: F) -> Result . where . F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> Result, . { 222,591 ( 0.00%) let snapshot = self.start_snapshot(); 679,140 ( 0.00%) let r = f(&snapshot); . debug!("commit_if_ok() -- r.is_ok() = {}", r.is_ok()); 232,721 ( 0.00%) match r { . Ok(_) => { 1,068,713 ( 0.00%) self.commit_from(snapshot); . } . Err(_) => { 465,573 ( 0.00%) self.rollback_to("commit_if_ok -- error", snapshot); . } . } 1,249,117 ( 0.01%) r . } . . /// Execute `f` then unroll any bindings it creates. 491,573 ( 0.00%) #[instrument(skip(self, f), level = "debug")] 621,775 ( 0.00%) pub fn probe(&self, f: F) -> R . where . F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> R, . { 151,905 ( 0.00%) let snapshot = self.start_snapshot(); 283,487 ( 0.00%) let r = f(&snapshot); 699,529 ( 0.00%) self.rollback_to("probe", snapshot); 54,109 ( 0.00%) r . } . . /// If `should_skip` is true, then execute `f` then unroll any bindings it creates. 58,959 ( 0.00%) #[instrument(skip(self, f), level = "debug")] 78,612 ( 0.00%) pub fn probe_maybe_skip_leak_check(&self, should_skip: bool, f: F) -> R . where . F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> R, . { 13,102 ( 0.00%) let snapshot = self.start_snapshot(); 6,551 ( 0.00%) let was_skip_leak_check = self.skip_leak_check.get(); 13,102 ( 0.00%) if should_skip { . self.skip_leak_check.set(true); . } 26,204 ( 0.00%) let r = f(&snapshot); 58,959 ( 0.00%) self.rollback_to("probe", snapshot); . self.skip_leak_check.set(was_skip_leak_check); 78,612 ( 0.00%) r . } . . /// Scan the constraints produced since `snapshot` began and returns: . /// . /// - `None` -- if none of them involve "region outlives" constraints . /// - `Some(true)` -- if there are `'a: 'b` constraints where `'a` or `'b` is a placeholder . /// - `Some(false)` -- if there are `'a: 'b` constraints but none involve placeholders 15,906 ( 0.00%) pub fn region_constraints_added_in_snapshot( . &self, . snapshot: &CombinedSnapshot<'a, 'tcx>, . ) -> Option { 31,812 ( 0.00%) self.inner . .borrow_mut() . .unwrap_region_constraints() . .region_constraints_added_in_snapshot(&snapshot.undo_snapshot) 23,859 ( 0.00%) } . . pub fn add_given(&self, sub: ty::Region<'tcx>, sup: ty::RegionVid) { . self.inner.borrow_mut().unwrap_region_constraints().add_given(sub, sup); . } . 2,184 ( 0.00%) pub fn can_sub(&self, param_env: ty::ParamEnv<'tcx>, a: T, b: T) -> UnitResult<'tcx> . where . T: at::ToTrace<'tcx>, . { . let origin = &ObligationCause::dummy(); . self.probe(|_| { . self.at(origin, param_env).sub(a, b).map(|InferOk { obligations: _, .. }| { . // Ignore obligations, since we are unrolling . // everything anyway. . }) . }) 1,638 ( 0.00%) } . 10,392 ( 0.00%) pub fn can_eq(&self, param_env: ty::ParamEnv<'tcx>, a: T, b: T) -> UnitResult<'tcx> . where . T: at::ToTrace<'tcx>, . { . let origin = &ObligationCause::dummy(); . self.probe(|_| { . self.at(origin, param_env).eq(a, b).map(|InferOk { obligations: _, .. }| { . // Ignore obligations, since we are unrolling . // everything anyway. . }) . }) 7,794 ( 0.00%) } . 53,860 ( 0.00%) #[instrument(skip(self), level = "debug")] . pub fn sub_regions( . &self, . origin: SubregionOrigin<'tcx>, . a: ty::Region<'tcx>, . b: ty::Region<'tcx>, . ) { 48,474 ( 0.00%) self.inner.borrow_mut().unwrap_region_constraints().make_subregion(origin, a, b); . } . . /// Require that the region `r` be equal to one of the regions in . /// the set `regions`. . #[instrument(skip(self), level = "debug")] . pub fn member_constraint( . &self, . opaque_type_def_id: DefId, -- line 947 ---------------------------------------- -- line 969 ---------------------------------------- . /// to `subtype_predicate` -- that is, "coercing" `a` to `b` winds up . /// actually requiring `a <: b`. This is of course a valid coercion, . /// but it's not as flexible as `FnCtxt::coerce` would be. . /// . /// (We may refactor this in the future, but there are a number of . /// practical obstacles. Among other things, `FnCtxt::coerce` presently . /// records adjustments that are required on the HIR in order to perform . /// the coercion, and we don't currently have a way to manage that.) 1,341 ( 0.00%) pub fn coerce_predicate( . &self, . cause: &ObligationCause<'tcx>, . param_env: ty::ParamEnv<'tcx>, . predicate: ty::PolyCoercePredicate<'tcx>, . ) -> Option> { 894 ( 0.00%) let subtype_predicate = predicate.map_bound(|p| ty::SubtypePredicate { . a_is_expected: false, // when coercing from `a` to `b`, `b` is expected . a: p.a, . b: p.b, . }); 2,235 ( 0.00%) self.subtype_predicate(cause, param_env, subtype_predicate) 1,788 ( 0.00%) } . 11,412 ( 0.00%) pub fn subtype_predicate( . &self, . cause: &ObligationCause<'tcx>, . param_env: ty::ParamEnv<'tcx>, . predicate: ty::PolySubtypePredicate<'tcx>, . ) -> Option> { . // Check for two unresolved inference variables, in which case we can . // make no progress. This is partly a micro-optimization, but it's . // also an opportunity to "sub-unify" the variables. This isn't -- line 999 ---------------------------------------- -- line 1002 ---------------------------------------- . // earlier that they are sub-unified). . // . // Note that we can just skip the binders here because . // type variables can't (at present, at . // least) capture any of the things bound by this binder. . // . // Note that this sub here is not just for diagnostics - it has semantic . // effects as well. 951 ( 0.00%) let r_a = self.shallow_resolve(predicate.skip_binder().a); 951 ( 0.00%) let r_b = self.shallow_resolve(predicate.skip_binder().b); 5,956 ( 0.00%) match (r_a.kind(), r_b.kind()) { 1,222 ( 0.00%) (&ty::Infer(ty::TyVar(a_vid)), &ty::Infer(ty::TyVar(b_vid))) => { . self.inner.borrow_mut().type_variables().sub(a_vid, b_vid); 1,222 ( 0.00%) return None; . } . _ => {} . } . . Some(self.commit_if_ok(|_snapshot| { 340 ( 0.00%) let ty::SubtypePredicate { a_is_expected, a, b } = . self.replace_bound_vars_with_placeholders(predicate); . 680 ( 0.00%) let ok = self.at(cause, param_env).sub_exp(a_is_expected, a, b)?; . . Ok(ok.unit()) . })) 8,559 ( 0.00%) } . 16,236 ( 0.00%) pub fn region_outlives_predicate( . &self, . cause: &traits::ObligationCause<'tcx>, . predicate: ty::PolyRegionOutlivesPredicate<'tcx>, . ) -> UnitResult<'tcx> { . self.commit_if_ok(|_snapshot| { . let ty::OutlivesPredicate(r_a, r_b) = . self.replace_bound_vars_with_placeholders(predicate); . let origin = SubregionOrigin::from_obligation_cause(cause, || { . RelateRegionParamBound(cause.span) . }); 13,530 ( 0.00%) self.sub_regions(origin, r_b, r_a); // `b : a` ==> `a <= b` . Ok(()) . }) 10,824 ( 0.00%) } . . /// Number of type variables created so far. 209 ( 0.00%) pub fn num_ty_vars(&self) -> usize { . self.inner.borrow_mut().type_variables().num_vars() 418 ( 0.00%) } . 57,156 ( 0.00%) pub fn next_ty_var_id(&self, origin: TypeVariableOrigin) -> TyVid { 285,780 ( 0.00%) self.inner.borrow_mut().type_variables().new_var(self.universe(), origin) 85,734 ( 0.00%) } . 51,138 ( 0.00%) pub fn next_ty_var(&self, origin: TypeVariableOrigin) -> Ty<'tcx> { 197,037 ( 0.00%) self.tcx.mk_ty_var(self.next_ty_var_id(origin)) 76,707 ( 0.00%) } . 1,808 ( 0.00%) pub fn next_ty_var_in_universe( . &self, . origin: TypeVariableOrigin, . universe: ty::UniverseIndex, . ) -> Ty<'tcx> { 9,944 ( 0.00%) let vid = self.inner.borrow_mut().type_variables().new_var(universe, origin); 904 ( 0.00%) self.tcx.mk_ty_var(vid) 2,712 ( 0.00%) } . . pub fn next_const_var( . &self, . ty: Ty<'tcx>, . origin: ConstVariableOrigin, . ) -> &'tcx ty::Const<'tcx> { . self.tcx.mk_const_var(self.next_const_var_id(origin), ty) . } -- line 1074 ---------------------------------------- -- line 1090 ---------------------------------------- . pub fn next_const_var_id(&self, origin: ConstVariableOrigin) -> ConstVid<'tcx> { . self.inner.borrow_mut().const_unification_table().new_key(ConstVarValue { . origin, . val: ConstVariableValue::Unknown { universe: self.universe() }, . }) . } . . fn next_int_var_id(&self) -> IntVid { 6,815 ( 0.00%) self.inner.borrow_mut().int_unification_table().new_key(None) . } . 4,089 ( 0.00%) pub fn next_int_var(&self) -> Ty<'tcx> { . self.tcx.mk_int_var(self.next_int_var_id()) 5,452 ( 0.00%) } . . fn next_float_var_id(&self) -> FloatVid { . self.inner.borrow_mut().float_unification_table().new_key(None) . } . . pub fn next_float_var(&self) -> Ty<'tcx> { . self.tcx.mk_float_var(self.next_float_var_id()) . } . . /// Creates a fresh region variable with the next available index. . /// The variable will be created in the maximum universe created . /// thus far, allowing it to name any region created thus far. 9,541 ( 0.00%) pub fn next_region_var(&self, origin: RegionVariableOrigin) -> ty::Region<'tcx> { 424,870 ( 0.00%) self.next_region_var_in_universe(origin, self.universe()) 19,082 ( 0.00%) } . . /// Creates a fresh region variable with the next available index . /// in the given universe; typically, you can use . /// `next_region_var` and just use the maximal universe. 164,744 ( 0.00%) pub fn next_region_var_in_universe( . &self, . origin: RegionVariableOrigin, . universe: ty::UniverseIndex, . ) -> ty::Region<'tcx> { . let region_var = 1,070,836 ( 0.00%) self.inner.borrow_mut().unwrap_region_constraints().new_region_var(universe, origin); 411,860 ( 0.00%) self.tcx.mk_region(ty::ReVar(region_var)) 247,116 ( 0.00%) } . . /// Return the universe that the region `r` was created in. For . /// most regions (e.g., `'static`, named regions from the user, . /// etc) this is the root universe U0. For inference variables or . /// placeholders, however, it will return the universe which which . /// they are associated. 8,076 ( 0.00%) pub fn universe_of_region(&self, r: ty::Region<'tcx>) -> ty::UniverseIndex { . self.inner.borrow_mut().unwrap_region_constraints().universe(r) 12,114 ( 0.00%) } . . /// Number of region variables created so far. 11,050 ( 0.00%) pub fn num_region_vars(&self) -> usize { . self.inner.borrow_mut().unwrap_region_constraints().num_region_vars() 16,575 ( 0.00%) } . . /// Just a convenient wrapper of `next_region_var` for using during NLL. 22,884 ( 0.00%) pub fn next_nll_region_var(&self, origin: NllRegionVariableOrigin) -> ty::Region<'tcx> { . self.next_region_var(RegionVariableOrigin::Nll(origin)) 45,768 ( 0.00%) } . . /// Just a convenient wrapper of `next_region_var` for using during NLL. 273 ( 0.00%) pub fn next_nll_region_var_in_universe( . &self, . origin: NllRegionVariableOrigin, . universe: ty::UniverseIndex, . ) -> ty::Region<'tcx> { 2,015 ( 0.00%) self.next_region_var_in_universe(RegionVariableOrigin::Nll(origin), universe) 546 ( 0.00%) } . 611,376 ( 0.00%) pub fn var_for_def(&self, span: Span, param: &ty::GenericParamDef) -> GenericArg<'tcx> { 294,720 ( 0.00%) match param.kind { . GenericParamDefKind::Lifetime => { . // Create a region inference variable for the given . // region parameter definition. 43,695 ( 0.00%) self.next_region_var(EarlyBoundRegion(span, param.name)).into() . } . GenericParamDefKind::Type { .. } => { . // Create a type inference variable for the given . // type parameter definition. The substitutions are . // for actual parameters that may be referred to by . // the default of this type parameter, if it exists. . // e.g., `struct Foo(...);` when . // used in a path such as `Foo::::new()` will . // use an inference variable for `C` with `[T, U]` . // as the substitutions for the default, `(T, U)`. 118,228 ( 0.00%) let ty_var_id = self.inner.borrow_mut().type_variables().new_var( . self.universe(), 147,785 ( 0.00%) TypeVariableOrigin { . kind: TypeVariableOriginKind::TypeParameterDefinition( 29,557 ( 0.00%) param.name, 29,557 ( 0.00%) Some(param.def_id), . ), . span, . }, . ); . 29,557 ( 0.00%) self.tcx.mk_ty_var(ty_var_id).into() . } . GenericParamDefKind::Const { .. } => { . let origin = ConstVariableOrigin { . kind: ConstVariableOriginKind::ConstParameterDefinition( . param.name, . param.def_id, . ), . span, . }; . let const_var_id = 34,870 ( 0.00%) self.inner.borrow_mut().const_unification_table().new_key(ConstVarValue { . origin, . val: ConstVariableValue::Unknown { universe: self.universe() }, . }); 3,170 ( 0.00%) self.tcx.mk_const_var(const_var_id, self.tcx.type_of(param.def_id)).into() . } . } 25,360 ( 0.00%) } . . /// Given a set of generics defined on a type or impl, returns a substitution mapping each . /// type/region parameter to a fresh inference variable. 155,022 ( 0.00%) pub fn fresh_substs_for_item(&self, span: Span, def_id: DefId) -> SubstsRef<'tcx> { 687,224 ( 0.00%) InternalSubsts::for_item(self.tcx, def_id, |param, _| self.var_for_def(span, param)) 103,348 ( 0.00%) } . . /// Returns `true` if errors have been reported since this infcx was . /// created. This is sometimes used as a heuristic to skip . /// reporting errors that often occur as a result of earlier . /// errors, but where it's hard to be 100% sure (e.g., unresolved . /// inference variables, regionck errors). 4,948 ( 0.00%) pub fn is_tainted_by_errors(&self) -> bool { . debug!( . "is_tainted_by_errors(err_count={}, err_count_on_creation={}, \ . tainted_by_errors_flag={})", . self.tcx.sess.err_count(), . self.err_count_on_creation, . self.tainted_by_errors_flag.get() . ); . 85,118 ( 0.00%) if self.tcx.sess.err_count() > self.err_count_on_creation { . return true; // errors reported since this infcx was made . } . self.tainted_by_errors_flag.get() 7,422 ( 0.00%) } . . /// Set the "tainted by errors" flag to true. We call this when we . /// observe an error from a prior pass. . pub fn set_tainted_by_errors(&self) { . debug!("set_tainted_by_errors()"); . self.tainted_by_errors_flag.set(true) . } . . /// Process the region constraints and return any any errors that . /// result. After this, no more unification operations should be . /// done -- or the compiler will panic -- but it is legal to use . /// `resolve_vars_if_possible` as well as `fully_resolve`. 79,100 ( 0.00%) pub fn resolve_regions( . &self, . region_context: DefId, . outlives_env: &OutlivesEnvironment<'tcx>, . mode: RegionckMode, . ) -> Vec> { 214,700 ( 0.00%) let (var_infos, data) = { . let mut inner = self.inner.borrow_mut(); . let inner = &mut *inner; 11,300 ( 0.00%) assert!( 33,900 ( 0.00%) self.is_tainted_by_errors() || inner.region_obligations.is_empty(), . "region_obligations not empty: {:#?}", . inner.region_obligations . ); . inner . .region_constraint_storage . .take() . .expect("regions already resolved") . .with_log(&mut inner.undo_log) . .into_infos_and_data() 11,300 ( 0.00%) }; . . let region_rels = 11,300 ( 0.00%) &RegionRelations::new(self.tcx, region_context, outlives_env.free_region_map()); . 101,700 ( 0.00%) let (lexical_region_resolutions, errors) = 271,200 ( 0.00%) lexical_region_resolve::resolve(region_rels, var_infos, data, mode); . 45,200 ( 0.00%) let old_value = self.lexical_region_resolutions.replace(Some(lexical_region_resolutions)); 11,300 ( 0.00%) assert!(old_value.is_none()); . . errors 101,700 ( 0.00%) } . . /// Process the region constraints and report any errors that . /// result. After this, no more unification operations should be . /// done -- or the compiler will panic -- but it is legal to use . /// `resolve_vars_if_possible` as well as `fully_resolve`. 113,000 ( 0.00%) pub fn resolve_regions_and_report_errors( . &self, . region_context: DefId, . outlives_env: &OutlivesEnvironment<'tcx>, . mode: RegionckMode, . ) { 22,600 ( 0.00%) let errors = self.resolve_regions(region_context, outlives_env, mode); . 33,900 ( 0.00%) if !self.is_tainted_by_errors() { . // As a heuristic, just skip reporting region errors . // altogether if other errors have been reported while . // this infcx was in use. This is totally hokey but . // otherwise we have a hard time separating legit region . // errors from silly ones. 22,600 ( 0.00%) self.report_region_errors(&errors); . } 56,500 ( 0.00%) } . . /// Obtains (and clears) the current set of region . /// constraints. The inference context is still usable: further . /// unifications will simply add new constraints. . /// . /// This method is not meant to be used with normal lexical region . /// resolution. Rather, it is used in the NLL mode as a kind of . /// interim hack: basically we run normal type-check and generate -- line 1307 ---------------------------------------- -- line 1319 ---------------------------------------- . } . . /// Gives temporary access to the region constraint data. . pub fn with_region_constraints( . &self, . op: impl FnOnce(&RegionConstraintData<'tcx>) -> R, . ) -> R { . let mut inner = self.inner.borrow_mut(); 12,000 ( 0.00%) op(inner.unwrap_region_constraints().data()) . } . . pub fn region_var_origin(&self, vid: ty::RegionVid) -> RegionVariableOrigin { . let mut inner = self.inner.borrow_mut(); . let inner = &mut *inner; . inner . .region_constraint_storage . .as_mut() -- line 1335 ---------------------------------------- -- line 1338 ---------------------------------------- . .var_origin(vid) . } . . /// Takes ownership of the list of variable regions. This implies . /// that all the region constraints have already been taken, and . /// hence that `resolve_regions_and_report_errors` can never be . /// called. This is used only during NLL processing to "hand off" ownership . /// of the set of region variables into the NLL region context. 6,745 ( 0.00%) pub fn take_region_var_origins(&self) -> VarInfos { . let mut inner = self.inner.borrow_mut(); 25,631 ( 0.00%) let (var_infos, data) = inner . .region_constraint_storage . .take() . .expect("regions already resolved") . .with_log(&mut inner.undo_log) 1,349 ( 0.00%) .into_infos_and_data(); 1,349 ( 0.00%) assert!(data.is_empty()); . var_infos 10,792 ( 0.00%) } . . pub fn ty_to_string(&self, t: Ty<'tcx>) -> String { . self.resolve_vars_if_possible(t).to_string() . } . . /// If `TyVar(vid)` resolves to a type, return that type. Else, return the . /// universe index of `TyVar(vid)`. 15,288 ( 0.00%) pub fn probe_ty_var(&self, vid: TyVid) -> Result, ty::UniverseIndex> { . use self::type_variable::TypeVariableValue; . 45,864 ( 0.00%) match self.inner.borrow_mut().type_variables().probe(vid) { . TypeVariableValue::Known { value } => Ok(value), . TypeVariableValue::Unknown { universe } => Err(universe), . } 53,508 ( 0.00%) } . . /// Resolve any type variables found in `value` -- but only one . /// level. So, if the variable `?X` is bound to some type . /// `Foo`, then this would return `Foo` (but `?Y` may . /// itself be bound to a type). . /// . /// Useful when you only need to inspect the outermost level of . /// the type and don't care about nested types (or perhaps you . /// will be resolving them as well, e.g. in a loop). . pub fn shallow_resolve(&self, value: T) -> T . where . T: TypeFoldable<'tcx>, . { 422,592 ( 0.00%) value.fold_with(&mut ShallowResolver { infcx: self }) . } . 29,382 ( 0.00%) pub fn root_var(&self, var: ty::TyVid) -> ty::TyVid { . self.inner.borrow_mut().type_variables().root_var(var) 44,073 ( 0.00%) } . . /// Where possible, replaces type/const variables in . /// `value` with their final value. Note that region variables . /// are unaffected. If a type/const variable has not been unified, it . /// is left as is. This is an idempotent operation that does . /// not affect inference state in any way and so you can do it . /// at will. 136,780 ( 0.00%) pub fn resolve_vars_if_possible(&self, value: T) -> T . where . T: TypeFoldable<'tcx>, . { 440,723 ( 0.00%) if !value.needs_infer() { 316,545 ( 0.00%) return value; // Avoid duplicated subst-folding. . } 484,915 ( 0.00%) let mut r = resolve::OpportunisticVarResolver::new(self); 549,198 ( 0.00%) value.fold_with(&mut r) 125,118 ( 0.00%) } . . /// Returns the first unresolved variable contained in `T`. In the . /// process of visiting `T`, this will resolve (where possible) . /// type variables in `T`, but it never constructs the final, . /// resolved type, so it's more efficient than . /// `resolve_vars_if_possible()`. . pub fn unresolved_type_vars(&self, value: &T) -> Option<(Ty<'tcx>, Option)> . where -- line 1415 ---------------------------------------- -- line 1490 ---------------------------------------- . expected: &'tcx ty::Const<'tcx>, . actual: &'tcx ty::Const<'tcx>, . err: TypeError<'tcx>, . ) -> DiagnosticBuilder<'tcx> { . let trace = TypeTrace::consts(cause, true, expected, actual); . self.report_and_explain_type_error(trace, &err) . } . 49,572 ( 0.00%) pub fn replace_bound_vars_with_fresh_vars( . &self, . span: Span, . lbrct: LateBoundRegionConversionTime, . value: ty::Binder<'tcx, T>, . ) -> (T, BTreeMap>) . where . T: TypeFoldable<'tcx>, . { . let fld_r = 107,133 ( 0.00%) |br: ty::BoundRegion| self.next_region_var(LateBoundRegion(span, br.kind, lbrct)); . let fld_t = |_| { . self.next_ty_var(TypeVariableOrigin { . kind: TypeVariableOriginKind::MiscVariable, . span, . }) . }; . let fld_c = |_, ty| { . self.next_const_var( . ty, . ConstVariableOrigin { kind: ConstVariableOriginKind::MiscVariable, span }, . ) . }; 195,519 ( 0.00%) self.tcx.replace_bound_vars(value, fld_r, fld_t, fld_c) 33,048 ( 0.00%) } . . /// See the [`region_constraints::RegionConstraintCollector::verify_generic_bound`] method. 888 ( 0.00%) pub fn verify_generic_bound( . &self, . origin: SubregionOrigin<'tcx>, . kind: GenericKind<'tcx>, . a: ty::Region<'tcx>, . bound: VerifyBound<'tcx>, . ) { . debug!("verify_generic_bound({:?}, {:?} <: {:?})", kind, a, bound); . 333 ( 0.00%) self.inner . .borrow_mut() . .unwrap_region_constraints() 1,887 ( 0.00%) .verify_generic_bound(origin, kind, a, bound); 777 ( 0.00%) } . . /// Obtains the latest type of the given closure; this may be a . /// closure in the current function, in which case its . /// `ClosureKind` may not yet be known. 3,970 ( 0.00%) pub fn closure_kind(&self, closure_substs: SubstsRef<'tcx>) -> Option { 7,940 ( 0.00%) let closure_kind_ty = closure_substs.as_closure().kind_ty(); . let closure_kind_ty = self.shallow_resolve(closure_kind_ty); 5,955 ( 0.00%) closure_kind_ty.to_opt_closure_kind() . } . . /// Clears the selection, evaluation, and projection caches. This is useful when . /// repeatedly attempting to select an `Obligation` while changing only . /// its `ParamEnv`, since `FulfillmentContext` doesn't use probing. . pub fn clear_caches(&self) { . self.selection_cache.clear(); . self.evaluation_cache.clear(); . self.inner.borrow_mut().projection_cache().clear(); . } . . pub fn universe(&self) -> ty::UniverseIndex { 782,384 ( 0.00%) self.universe.get() 261,294 ( 0.00%) } . . /// Creates and return a fresh universe that extends all previous . /// universes. Updates `self.universe` to that new universe. 278 ( 0.00%) pub fn create_next_universe(&self) -> ty::UniverseIndex { 398 ( 0.00%) let u = self.universe.get().next_universe(); . self.universe.set(u); . u 278 ( 0.00%) } . . /// Resolves and evaluates a constant. . /// . /// The constant can be located on a trait like `::C`, in which case the given . /// substitutions and environment are used to resolve the constant. Alternatively if the . /// constant has generic parameters in scope the substitutions are used to evaluate the value of . /// the constant. For example in `fn foo() { let _ = [0; bar::()]; }` the repeat count . /// constant `bar::()` requires a substitution for `T`, if the substitution for `T` is still . /// too generic for the constant to be evaluated then `Err(ErrorHandled::TooGeneric)` is . /// returned. . /// . /// This handles inferences variables within both `param_env` and `substs` by . /// performing the operation on their respective canonical forms. 507 ( 0.00%) pub fn const_eval_resolve( . &self, . param_env: ty::ParamEnv<'tcx>, . unevaluated: ty::Unevaluated<'tcx>, . span: Option, . ) -> EvalToConstValueResult<'tcx> { 39 ( 0.00%) let substs = self.resolve_vars_if_possible(unevaluated.substs); . . // Postpone the evaluation of constants whose substs depend on inference . // variables . if substs.has_infer_types_or_consts() { . return Err(ErrorHandled::TooGeneric); . } . 39 ( 0.00%) let param_env_erased = self.tcx.erase_regions(param_env); 9 ( 0.00%) let substs_erased = self.tcx.erase_regions(substs); . . let unevaluated = ty::Unevaluated { . def: unevaluated.def, . substs: substs_erased, 78 ( 0.00%) promoted: unevaluated.promoted, . }; . . // The return value is the evaluated value which doesn't contain any reference to inference . // variables, thus we don't need to substitute back the original values. 468 ( 0.00%) self.tcx.const_eval_resolve(param_env_erased, unevaluated, span) 351 ( 0.00%) } . . /// If `typ` is a type variable of some kind, resolve it one level . /// (but do not resolve types found in the result). If `typ` is . /// not a type variable, just return it unmodified. . // FIXME(eddyb) inline into `ShallowResolver::visit_ty`. 5,077,592 ( 0.02%) fn shallow_resolve_ty(&self, typ: Ty<'tcx>) -> Ty<'tcx> { 3,006,051 ( 0.01%) match *typ.kind() { . ty::Infer(ty::TyVar(v)) => { . // Not entirely obvious: if `typ` is a type variable, . // it can be resolved to an int/float variable, which . // can then be recursively resolved, hence the . // recursion. Note though that we prevent type . // variables from unifying to other type variables . // directly (though they may be embedded . // structurally), and we prevent cycles in any case, . // so this recursion should always be of very limited . // depth. . // . // Note: if these two lines are combined into one we get . // dynamic borrow errors on `self.inner`. 1,558,476 ( 0.01%) let known = self.inner.borrow_mut().type_variables().probe(v).known(); . known.map_or(typ, |t| self.shallow_resolve_ty(t)) . } . 164,852 ( 0.00%) ty::Infer(ty::IntVar(v)) => self . .inner . .borrow_mut() . .int_unification_table() . .probe_value(v) 40,176 ( 0.00%) .map(|v| v.to_type(self.tcx)) . .unwrap_or(typ), . . ty::Infer(ty::FloatVar(v)) => self . .inner . .borrow_mut() . .float_unification_table() . .probe_value(v) . .map(|v| v.to_type(self.tcx)) . .unwrap_or(typ), . . _ => typ, . } 5,712,291 ( 0.02%) } . . /// `ty_or_const_infer_var_changed` is equivalent to one of these two: . /// * `shallow_resolve(ty) != ty` (where `ty.kind = ty::Infer(_)`) . /// * `shallow_resolve(ct) != ct` (where `ct.kind = ty::ConstKind::Infer(_)`) . /// . /// However, `ty_or_const_infer_var_changed` is more efficient. It's always . /// inlined, despite being large, because it has only two call sites that . /// are extremely hot (both in `traits::fulfill`'s checking of `stalled_on` -- line 1659 ---------------------------------------- -- line 1662 ---------------------------------------- . #[inline(always)] . pub fn ty_or_const_infer_var_changed(&self, infer_var: TyOrConstInferVar<'tcx>) -> bool { . match infer_var { . TyOrConstInferVar::Ty(v) => { . use self::type_variable::TypeVariableValue; . . // If `inlined_probe` returns a `Known` value, it never equals . // `ty::Infer(ty::TyVar(v))`. 15,725,791 ( 0.07%) match self.inner.borrow_mut().type_variables().inlined_probe(v) { . TypeVariableValue::Unknown { .. } => false, . TypeVariableValue::Known { .. } => true, . } . } . . TyOrConstInferVar::TyInt(v) => { . // If `inlined_probe_value` returns a value it's always a . // `ty::Int(_)` or `ty::UInt(_)`, which never matches a . // `ty::Infer(_)`. 32,930 ( 0.00%) self.inner.borrow_mut().int_unification_table().inlined_probe_value(v).is_some() . } . . TyOrConstInferVar::TyFloat(v) => { . // If `probe_value` returns a value it's always a . // `ty::Float(_)`, which never matches a `ty::Infer(_)`. . // . // Not `inlined_probe_value(v)` because this call site is colder. . self.inner.borrow_mut().float_unification_table().probe_value(v).is_some() -- line 1688 ---------------------------------------- -- line 1716 ---------------------------------------- . /// Equivalent to `ty::ConstKind::Infer(ty::InferConst::Var(_))`. . Const(ConstVid<'tcx>), . } . . impl<'tcx> TyOrConstInferVar<'tcx> { . /// Tries to extract an inference variable from a type or a constant, returns `None` . /// for types other than `ty::Infer(_)` (or `InferTy::Fresh*`) and . /// for constants other than `ty::ConstKind::Infer(_)` (or `InferConst::Fresh`). 28,252 ( 0.00%) pub fn maybe_from_generic_arg(arg: GenericArg<'tcx>) -> Option { . match arg.unpack() { . GenericArgKind::Type(ty) => Self::maybe_from_ty(ty), . GenericArgKind::Const(ct) => Self::maybe_from_const(ct), . GenericArgKind::Lifetime(_) => None, . } 28,252 ( 0.00%) } . . /// Tries to extract an inference variable from a type, returns `None` . /// for types other than `ty::Infer(_)` (or `InferTy::Fresh*`). 1,220 ( 0.00%) pub fn maybe_from_ty(ty: Ty<'tcx>) -> Option { 260,217 ( 0.00%) match *ty.kind() { 54,548 ( 0.00%) ty::Infer(ty::TyVar(v)) => Some(TyOrConstInferVar::Ty(v)), 4,142 ( 0.00%) ty::Infer(ty::IntVar(v)) => Some(TyOrConstInferVar::TyInt(v)), . ty::Infer(ty::FloatVar(v)) => Some(TyOrConstInferVar::TyFloat(v)), . _ => None, . } 1,220 ( 0.00%) } . . /// Tries to extract an inference variable from a constant, returns `None` . /// for constants other than `ty::ConstKind::Infer(_)` (or `InferConst::Fresh`). . pub fn maybe_from_const(ct: &'tcx ty::Const<'tcx>) -> Option { . match ct.val { . ty::ConstKind::Infer(InferConst::Var(v)) => Some(TyOrConstInferVar::Const(v)), . _ => None, . } -- line 1749 ---------------------------------------- -- line 1755 ---------------------------------------- . } . . impl<'a, 'tcx> TypeFolder<'tcx> for ShallowResolver<'a, 'tcx> { . fn tcx<'b>(&'b self) -> TyCtxt<'tcx> { . self.infcx.tcx . } . . fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { 920,270 ( 0.00%) self.infcx.shallow_resolve_ty(ty) . } . 26,536 ( 0.00%) fn fold_const(&mut self, ct: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> { 21,760 ( 0.00%) if let ty::Const { val: ty::ConstKind::Infer(InferConst::Var(vid)), .. } = ct { 21,230 ( 0.00%) self.infcx . .inner . .borrow_mut() . .const_unification_table() 12,738 ( 0.00%) .probe_value(*vid) . .val . .known() . .unwrap_or(ct) . } else { . ct . } 33,170 ( 0.00%) } . } . . impl<'tcx> TypeTrace<'tcx> { . pub fn span(&self) -> Span { . self.cause.span . } . . pub fn types( -- line 1787 ---------------------------------------- -- line 1818 ---------------------------------------- . CompareImplTypeObligation { span, .. } => span, . } . } . . pub fn from_obligation_cause(cause: &traits::ObligationCause<'tcx>, default: F) -> Self . where . F: FnOnce() -> Self, . { 33,596 ( 0.00%) match *cause.code() { 7,512 ( 0.00%) traits::ObligationCauseCode::ReferenceOutlivesReferent(ref_type) => { 37,560 ( 0.00%) SubregionOrigin::ReferenceOutlivesReferent(ref_type, cause.span) . } . . traits::ObligationCauseCode::CompareImplMethodObligation { . impl_item_def_id, . trait_item_def_id, . } => SubregionOrigin::CompareImplMethodObligation { . span: cause.span, . impl_item_def_id, -- line 1836 ---------------------------------------- 13,993,105 ( 0.06%) -------------------------------------------------------------------------------- The following files chosen for auto-annotation could not be found: -------------------------------------------------------------------------------- ./elf/dl-lookup.c ./malloc/malloc.c ./string/../sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S ./string/../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S ./string/../sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S /tmp/gcc-build/x86_64-unknown-linux-gnu/libstdc++-v3/libsupc++/../../../../gcc-5.5.0/libstdc++-v3/libsupc++/new_op.cc /tmp/gcc-build/x86_64-unknown-linux-gnu/libstdc++-v3/libsupc++/../../../../gcc-5.5.0/libstdc++-v3/libsupc++/new_opv.cc -------------------------------------------------------------------------------- Ir -------------------------------------------------------------------------------- 475,861,562 ( 1.98%) events annotated