-------------------------------------------------------------------------------- I1 cache: 65536 B, 64 B, 4-way associative D1 cache: 32768 B, 64 B, 8-way associative LL cache: 67108864 B, 64 B, 64-way associative Command: /usr/home/liquid/.rustup/toolchains/w-profiling/bin/rustc --crate-name slog_term --edition=2018 src/lib.rs --error-format=json --json=diagnostic-rendered-ansi,artifacts,future-incompat --crate-type lib --emit=dep-info,metadata,link -C opt-level=3 -C embed-bitcode=no -C metadata=5a921ca04f3a167f -C extra-filename=-5a921ca04f3a167f --out-dir /usr/home/liquid/tmp/.tmpLdCpGe/target/release/deps -L dependency=/usr/home/liquid/tmp/.tmpLdCpGe/target/release/deps --extern atty=/usr/home/liquid/tmp/.tmpLdCpGe/target/release/deps/libatty-93194675dc24e778.rmeta --extern chrono=/usr/home/liquid/tmp/.tmpLdCpGe/target/release/deps/libchrono-fc7a1e73f8eb2fa1.rmeta --extern slog=/usr/home/liquid/tmp/.tmpLdCpGe/target/release/deps/libslog-0e00d51925b9a38f.rmeta --extern term=/usr/home/liquid/tmp/.tmpLdCpGe/target/release/deps/libterm-e20aa5908d2f8064.rmeta --extern thread_local=/usr/home/liquid/tmp/.tmpLdCpGe/target/release/deps/libthread_local-3272d6b27af52041.rmeta -Adeprecated -Aunknown-lints -Zincremental-verify-ich Data file: results/cgout-w-profiling-slog-term-2.8.0-Opt-Full Events recorded: Ir Events shown: Ir Event sort order: Ir Thresholds: 0.1 Include dirs: User annotated: Auto-annotation: on -------------------------------------------------------------------------------- Ir -------------------------------------------------------------------------------- 7,690,404,009 (100.0%) PROGRAM TOTALS -------------------------------------------------------------------------------- Ir file:function -------------------------------------------------------------------------------- 293,646,397 ( 3.82%) ./malloc/malloc.c:_int_free 213,539,145 ( 2.78%) ./malloc/malloc.c:_int_malloc 180,401,025 ( 2.35%) ./malloc/malloc.c:malloc 168,119,279 ( 2.19%) ???:llvm::InstCombinerImpl::run() 143,149,671 ( 1.86%) ???:llvm::SelectionDAG::Combine(llvm::CombineLevel, llvm::AAResults*, llvm::CodeGenOpt::Level) 113,206,565 ( 1.47%) ???:combineInstructionsOverFunction(llvm::Function&, llvm::InstCombineWorklist&, llvm::AAResults*, llvm::AssumptionCache&, llvm::TargetLibraryInfo&, llvm::TargetTransformInfo&, llvm::DominatorTree&, llvm::OptimizationRemarkEmitter&, llvm::BlockFrequencyInfo*, llvm::ProfileSummaryInfo*, unsigned int, llvm::LoopInfo*) 112,360,657 ( 1.46%) ???:llvm::FPPassManager::runOnFunction(llvm::Function&) 93,911,879 ( 1.22%) ./malloc/malloc.c:free 81,178,635 ( 1.06%) ???:llvm::BasicAAResult::alias(llvm::MemoryLocation const&, llvm::MemoryLocation const&, llvm::AAQueryInfo&) 74,161,899 ( 0.96%) ???:llvm::isNonEscapingLocalObject(llvm::Value const*, llvm::SmallDenseMap, llvm::detail::DenseMapPair >*) 67,194,835 ( 0.87%) ???:computeKnownBits(llvm::Value const*, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) [clone .llvm.15619146473165121143] 65,584,343 ( 0.85%) ???:runCVP(llvm::Module&) [clone .llvm.11785992503873176614] 62,847,385 ( 0.82%) ???:llvm::AnalysisManager::getResultImpl(llvm::AnalysisKey*, llvm::Function&) 59,765,849 ( 0.78%) ???:llvm::AnalysisManager::invalidate(llvm::Function&, llvm::PreservedAnalyses const&) 56,927,215 ( 0.74%) ???:computeKnownBitsFromOperator(llvm::Operator const*, llvm::APInt const&, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) 53,229,287 ( 0.69%) ???:llvm::DataLayout::getAlignment(llvm::Type*, bool) const 52,061,523 ( 0.68%) ???:bool llvm::DenseMapBase*, llvm::DenseMapInfo<(anonymous namespace)::SimpleValue>, llvm::detail::DenseMapPair<(anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*> >, (anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*, llvm::DenseMapInfo<(anonymous namespace)::SimpleValue>, llvm::detail::DenseMapPair<(anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*> >::LookupBucketFor<(anonymous namespace)::SimpleValue>((anonymous namespace)::SimpleValue const&, llvm::detail::DenseMapPair<(anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*> const*&) const 50,131,228 ( 0.65%) ???:llvm::ValueHandleBase::AddToUseList() 48,896,279 ( 0.64%) ???:llvm::LiveVariables::runOnBlock(llvm::MachineBasicBlock*, unsigned int) 45,975,606 ( 0.60%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::CalculateFromScratch(llvm::DominatorTreeBase&, llvm::DomTreeBuilder::SemiNCAInfo >::BatchUpdateInfo*) 45,464,315 ( 0.59%) ???:llvm::DataLayout::getTypeSizeInBits(llvm::Type*) const 43,033,613 ( 0.56%) ???:(anonymous namespace)::LazyValueInfoImpl::solve() [clone .llvm.4316243980339171764] 41,298,087 ( 0.54%) ???:llvm::TargetLibraryInfoImpl::getLibFunc(llvm::Function const&, llvm::LibFunc&) const 40,650,895 ( 0.53%) ???:llvm::InstCombinerImpl::SimplifyDemandedUseBits(llvm::Value*, llvm::APInt, llvm::KnownBits&, unsigned int, llvm::Instruction*) 39,914,683 ( 0.52%) ./string/../sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S:__memcmp_avx2_movbe 38,631,207 ( 0.50%) ???:llvm::removeUnreachableBlocks(llvm::Function&, llvm::DomTreeUpdater*, llvm::MemorySSAUpdater*) 38,102,731 ( 0.50%) ???:llvm::InstCombinerImpl::visitCallInst(llvm::CallInst&) 38,089,168 ( 0.50%) ???:llvm::SimplifyInstruction(llvm::Instruction*, llvm::SimplifyQuery const&, llvm::OptimizationRemarkEmitter*) 37,377,422 ( 0.49%) ./string/../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:__memcpy_avx_unaligned_erms 37,224,930 ( 0.48%) ???:llvm::AttributeList::addAttributes(llvm::LLVMContext&, unsigned int, llvm::AttrBuilder const&) const 34,213,487 ( 0.44%) /tmp/gcc-build/x86_64-unknown-linux-gnu/libstdc++-v3/libsupc++/../../../../gcc-5.5.0/libstdc++-v3/libsupc++/new_op.cc:operator new(unsigned long) 34,002,907 ( 0.44%) ???:llvm::SelectionDAGISel::SelectCodeCommon(llvm::SDNode*, unsigned char const*, unsigned int) 33,415,292 ( 0.43%) ???:llvm::SROA::runOnAlloca(llvm::AllocaInst&) 33,160,689 ( 0.43%) ???:(anonymous namespace)::MachineCopyPropagation::runOnMachineFunction(llvm::MachineFunction&) 31,715,312 ( 0.41%) ???:llvm::BitstreamCursor::readRecord(unsigned int, llvm::SmallVectorImpl&, llvm::StringRef*) 31,020,004 ( 0.40%) ./malloc/malloc.c:malloc_consolidate 30,865,594 ( 0.40%) ???:(anonymous namespace)::eliminateDeadStores(llvm::Function&, llvm::AAResults&, llvm::MemorySSA&, llvm::DominatorTree&, llvm::PostDominatorTree&, llvm::TargetLibraryInfo const&, llvm::LoopInfo const&) [clone .llvm.5769264623867638418] 29,398,111 ( 0.38%) ???:(anonymous namespace)::DAGCombiner::combine(llvm::SDNode*) 29,016,920 ( 0.38%) ./string/../sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S:__memset_avx2_erms 26,168,836 ( 0.34%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::runSemiNCA(llvm::DominatorTreeBase&, unsigned int) 25,906,163 ( 0.34%) ???:(anonymous namespace)::DeadMachineInstructionElim::eliminateDeadMI(llvm::MachineFunction&) 25,225,462 ( 0.33%) ???:computeKnownBitsFromAssume(llvm::Value const*, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) 24,807,730 ( 0.32%) ???:llvm::SimplifyGEPInst(llvm::Type*, llvm::ArrayRef, llvm::SimplifyQuery const&) 24,475,203 ( 0.32%) ???:llvm::InlineFunction(llvm::CallBase&, llvm::InlineFunctionInfo&, llvm::AAResults*, bool, llvm::Function*) 24,455,747 ( 0.32%) ???:(anonymous namespace)::EarlyCSE::run() [clone .llvm.7062997131228810369] 23,719,621 ( 0.31%) ???:llvm::GVN::processBlock(llvm::BasicBlock*) 23,117,700 ( 0.30%) ???:llvm::AttributeSetNode::get(llvm::LLVMContext&, llvm::AttrBuilder const&) 22,749,915 ( 0.30%) ???:llvm::TargetLoweringBase::getTypeConversion(llvm::LLVMContext&, llvm::EVT) const 22,392,880 ( 0.29%) ???:llvm::InstCombinerImpl::visitICmpInst(llvm::ICmpInst&) 22,346,828 ( 0.29%) ???:llvm::ScheduleDAGSDNodes::BuildSchedUnits() 22,346,313 ( 0.29%) ???:(anonymous namespace)::LazyValueInfoImpl::getEdgeValue(llvm::Value*, llvm::BasicBlock*, llvm::BasicBlock*, llvm::Instruction*) [clone .llvm.4316243980339171764] 21,961,576 ( 0.29%) ???:llvm::Type::isSizedDerivedType(llvm::SmallPtrSetImpl*) const 21,936,699 ( 0.29%) ???:llvm::FindFunctionBackedges(llvm::Function const&, llvm::SmallVectorImpl >&) 21,850,703 ( 0.28%) ???:llvm::simplifyCFG(llvm::BasicBlock*, llvm::TargetTransformInfo const&, llvm::DomTreeUpdater*, llvm::SimplifyCFGOptions const&, llvm::ArrayRef) 21,286,768 ( 0.28%) ???:llvm::SROA::runImpl(llvm::Function&, llvm::DominatorTree&, llvm::AssumptionCache&) 21,168,059 ( 0.28%) ???:(anonymous namespace)::Verifier::visitInstruction(llvm::Instruction&) 21,085,212 ( 0.27%) ???:llvm::Intrinsic::getDeclaration(llvm::Module*, unsigned int, llvm::ArrayRef) 21,063,553 ( 0.27%) ???:llvm::InstCombinerImpl::visitStoreInst(llvm::StoreInst&) 20,553,011 ( 0.27%) ???:llvm::MemoryDependenceResults::getNonLocalPointerDepFromBB(llvm::Instruction*, llvm::PHITransAddr const&, llvm::MemoryLocation const&, bool, llvm::BasicBlock*, llvm::SmallVectorImpl&, llvm::DenseMap, llvm::detail::DenseMapPair >&, bool, bool) 20,429,216 ( 0.27%) ./malloc/malloc.c:unlink_chunk.constprop.0 20,128,490 ( 0.26%) ???:llvm::BasicAAResult::getModRefInfo(llvm::CallBase const*, llvm::MemoryLocation const&, llvm::AAQueryInfo&) 19,716,808 ( 0.26%) ???:llvm::MemorySSA::buildMemorySSA(llvm::BatchAAResults&) 19,710,365 ( 0.26%) ???:llvm::PMDataManager::verifyPreservedAnalysis(llvm::Pass*) 19,308,500 ( 0.25%) ???:llvm::LivePhysRegs::stepBackward(llvm::MachineInstr const&) 19,233,684 ( 0.25%) ???:llvm::InstCombinerImpl::visitLoadInst(llvm::LoadInst&) 18,971,819 ( 0.25%) ???:SimplifyICmpInst(unsigned int, llvm::Value*, llvm::Value*, llvm::SimplifyQuery const&, unsigned int) [clone .llvm.1619516508949622737] 18,148,847 ( 0.24%) ???:llvm::LiveVariables::HandleRegMask(llvm::MachineOperand const&) 18,080,432 ( 0.24%) ???:llvm::PointerMayBeCaptured(llvm::Value const*, llvm::CaptureTracker*, unsigned int) 18,034,668 ( 0.23%) ???:llvm::coro::declaresIntrinsics(llvm::Module const&, std::initializer_list) 17,502,212 ( 0.23%) ???:llvm::AAResults::getModRefInfo(llvm::Instruction const*, llvm::Optional const&, llvm::AAQueryInfo&) 17,146,456 ( 0.22%) ???:llvm::SROA::rewritePartition(llvm::AllocaInst&, llvm::sroa::AllocaSlices&, llvm::sroa::Partition&) 16,910,412 ( 0.22%) ???:llvm::MachineInstr::addOperand(llvm::MachineFunction&, llvm::MachineOperand const&) 16,476,060 ( 0.21%) ./string/../sysdeps/x86_64/multiarch/strcmp-avx2.S:__strncmp_avx2 16,226,115 ( 0.21%) ???:(anonymous namespace)::CVPLatticeFunc::ComputeInstructionState(llvm::Instruction&, llvm::DenseMap, llvm::PointerIntPairInfo > >, (anonymous namespace)::CVPLatticeVal, llvm::DenseMapInfo, llvm::PointerIntPairInfo > > >, llvm::detail::DenseMapPair, llvm::PointerIntPairInfo > >, (anonymous namespace)::CVPLatticeVal> >&, llvm::SparseSolver, llvm::PointerIntPairInfo > >, (anonymous namespace)::CVPLatticeVal, llvm::LatticeKeyInfo, llvm::PointerIntPairInfo > > > >&) 15,639,117 ( 0.20%) ???:isKnownNonZero(llvm::Value const*, llvm::APInt const&, unsigned int, (anonymous namespace)::Query const&) [clone .llvm.15619146473165121143] 15,345,083 ( 0.20%) ???:llvm::SelectionDAG::computeKnownBits(llvm::SDValue, llvm::APInt const&, unsigned int) const 15,227,407 ( 0.20%) ???:llvm::SmallPtrSetImplBase::insert_imp_big(void const*) 14,949,541 ( 0.19%) ???:llvm::Type::getPrimitiveSizeInBits() const 14,869,557 ( 0.19%) ???:llvm::DAGTypeLegalizer::run() 14,863,497 ( 0.19%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::FindRoots(llvm::DominatorTreeBase const&, llvm::DomTreeBuilder::SemiNCAInfo >::BatchUpdateInfo*) 14,847,767 ( 0.19%) ???:llvm::detail::PassModel>, llvm::PreservedAnalyses, llvm::AnalysisManager>::run(llvm::Function&, llvm::AnalysisManager&) 14,778,696 ( 0.19%) ???:llvm::DemandedBits::isInstructionDead(llvm::Instruction*) 14,620,607 ( 0.19%) ???:llvm::TargetLowering::SimplifyDemandedBits(llvm::SDValue, llvm::APInt const&, llvm::APInt const&, llvm::KnownBits&, llvm::TargetLowering::TargetLoweringOpt&, unsigned int, bool) const 14,515,200 ( 0.19%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::CalculateFromScratch(llvm::DominatorTreeBase&, llvm::DomTreeBuilder::SemiNCAInfo >::BatchUpdateInfo*) 14,297,777 ( 0.19%) ???:(anonymous namespace)::SelectionDAGLegalize::LegalizeOp(llvm::SDNode*) [clone .llvm.8386621111310650999] 14,282,453 ( 0.19%) ???:llvm::SelectionDAG::getConstant(llvm::ConstantInt const&, llvm::SDLoc const&, llvm::EVT, bool, bool) 14,129,110 ( 0.18%) ???:(anonymous namespace)::SimplifyCFGOpt::simplifyCondBranch(llvm::BranchInst*, llvm::IRBuilder&) 14,064,119 ( 0.18%) ???:llvm::AttributeList::get(llvm::LLVMContext&, llvm::ArrayRef) 14,060,494 ( 0.18%) ???:llvm::Instruction::eraseFromParent() 14,017,230 ( 0.18%) ???:(anonymous namespace)::AggressiveDeadCodeElimination::performDeadCodeElimination() 13,902,720 ( 0.18%) ./string/../sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S:__memset_avx2_unaligned_erms 13,880,533 ( 0.18%) ???:llvm::APInt::initSlowCase(unsigned long, bool) 13,573,588 ( 0.18%) ???:(anonymous namespace)::LazyValueInfoImpl::getValueInBlock(llvm::Value*, llvm::BasicBlock*, llvm::Instruction*) [clone .llvm.4316243980339171764] 13,528,769 ( 0.18%) ???:llvm::InstCombinerImpl::visitBitCast(llvm::BitCastInst&) 13,342,743 ( 0.17%) ???:llvm::SelectionDAG::Legalize() 13,280,322 ( 0.17%) ???:updateCGAndAnalysisManagerForPass(llvm::LazyCallGraph&, llvm::LazyCallGraph::SCC&, llvm::LazyCallGraph::Node&, llvm::AnalysisManager&, llvm::CGSCCUpdateResult&, llvm::AnalysisManager&, bool) [clone .llvm.5426518467876156712] 13,011,378 ( 0.17%) ???:llvm::SelectionDAG::MorphNodeTo(llvm::SDNode*, unsigned int, llvm::SDVTList, llvm::ArrayRef) 12,961,181 ( 0.17%) ???:llvm::InstrEmitter::EmitMachineNode(llvm::SDNode*, bool, bool, llvm::DenseMap, llvm::detail::DenseMapPair >&) 12,893,739 ( 0.17%) ???:(anonymous namespace)::PruningFunctionCloner::CloneBlock(llvm::BasicBlock const*, llvm::ilist_iterator, false, true>, std::vector >&) 12,878,020 ( 0.17%) ???:??? 12,777,244 ( 0.17%) ???:llvm::AAResults::Model::pointsToConstantMemory(llvm::MemoryLocation const&, llvm::AAQueryInfo&, bool) 12,436,419 ( 0.16%) ???:llvm::AAResults::Model::getModRefBehavior(llvm::CallBase const*) 12,383,334 ( 0.16%) ???:llvm::MemoryLocation::getOrNone(llvm::Instruction const*) 12,328,447 ( 0.16%) ???:llvm::BlockFrequencyInfoImpl::initializeRPOT() 12,135,076 ( 0.16%) ???:llvm::IDFCalculatorBase::calculate(llvm::SmallVectorImpl&) 11,965,865 ( 0.16%) ???:llvm::SCCPInstVisitor::solve() 11,926,173 ( 0.16%) ???:llvm::InstCombinerImpl::visitTrunc(llvm::TruncInst&) 11,864,733 ( 0.15%) ???:llvm::FoldingSetBase::FindNodeOrInsertPos(llvm::FoldingSetNodeID const&, void*&, llvm::FoldingSetBase::FoldingSetInfo const&) 11,629,557 ( 0.15%) ???:llvm::JumpThreadingPass::processBlock(llvm::BasicBlock*) 11,460,917 ( 0.15%) /usr/home/liquid/.cargo/registry/src/github.com-1ecc6299db9ec823/hashbrown-0.12.0/src/raw/mod.rs:, (), core::hash::BuildHasherDefault>>::from_hash::>::{closure#0}> 11,452,194 ( 0.15%) ???:llvm::Value::stripPointerCasts() const 11,428,475 ( 0.15%) ???:llvm::ReassociatePass::BuildRankMap(llvm::Function&, llvm::ReversePostOrderTraversal >&) 11,295,860 ( 0.15%) ???:llvm::PopulateLoopsDFS::traverse(llvm::BasicBlock*) 11,282,014 ( 0.15%) ???:llvm::Value::stripAndAccumulateConstantOffsets(llvm::DataLayout const&, llvm::APInt&, bool, llvm::function_ref) const 11,265,354 ( 0.15%) ???:llvm::getObjectSize(llvm::Value const*, unsigned long&, llvm::DataLayout const&, llvm::TargetLibraryInfo const*, llvm::ObjectSizeOpts) 11,133,806 ( 0.14%) ???:llvm::LoopInfoBase::analyze(llvm::DominatorTreeBase const&) 11,063,760 ( 0.14%) ???:computeKnownBits(llvm::Value const*, llvm::APInt const&, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) 10,931,345 ( 0.14%) ???:runImpl(llvm::Function&, llvm::LazyValueInfo*, llvm::DominatorTree*, llvm::SimplifyQuery const&) [clone .llvm.16011871802505272439] 10,923,407 ( 0.14%) ???:llvm::AttributeList::addAttribute(llvm::LLVMContext&, unsigned int, llvm::Attribute::AttrKind) const 10,833,694 ( 0.14%) ???:llvm::Instruction::~Instruction() 10,655,615 ( 0.14%) ???:llvm::BlockFrequencyInfoImpl::tryToComputeMassInFunction() 10,495,255 ( 0.14%) ???:llvm::BlockFrequencyInfoImplBase::finalizeMetrics() 10,309,353 ( 0.13%) ???:llvm::ReassociatePass::run(llvm::Function&, llvm::AnalysisManager&) 10,309,248 ( 0.13%) ./elf/dl-lookup.c:_dl_lookup_symbol_x 10,283,344 ( 0.13%) ???:llvm::PassRegistry::enumerateWith(llvm::PassRegistrationListener*) 10,182,450 ( 0.13%) ???:llvm::InstCombinerImpl::visitAllocSite(llvm::Instruction&) 9,932,398 ( 0.13%) ???:(anonymous namespace)::ScheduleDAGRRList::Schedule() [clone .llvm.6953762222372402862] 9,846,114 ( 0.13%) ./string/../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:__memcpy_sse2_unaligned_erms 9,838,142 ( 0.13%) ???:(anonymous namespace)::BitcodeReader::parseFunctionBody(llvm::Function*) 9,814,042 ( 0.13%) ???:llvm::raw_svector_ostream::write_impl(char const*, unsigned long) 9,767,853 ( 0.13%) ???:getAdjustedPtr(llvm::IRBuilder&, llvm::DataLayout const&, llvm::Value*, llvm::APInt, llvm::Type*, llvm::Twine const&) 9,693,861 ( 0.13%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::runSemiNCA(llvm::DominatorTreeBase&, unsigned int) 9,690,940 ( 0.13%) ???:llvm::MemorySSA::OptimizeUses::optimizeUses() 9,689,539 ( 0.13%) ???:llvm::DenseMapBase, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> >, llvm::DenseMapInfo >, llvm::detail::DenseMapPair, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> > > >, llvm::PoisoningVH, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> >, llvm::DenseMapInfo >, llvm::detail::DenseMapPair, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> > > >::destroyAll() [clone .llvm.4316243980339171764] 9,378,274 ( 0.12%) ???:llvm::PMDataManager::removeNotPreservedAnalysis(llvm::Pass*) 9,351,887 ( 0.12%) ???:llvm::LivePhysRegs::addPristines(llvm::MachineFunction const&) 9,288,422 ( 0.12%) ???:llvm::ConstantFoldTerminator(llvm::BasicBlock*, bool, llvm::TargetLibraryInfo const*, llvm::DomTreeUpdater*) 9,263,744 ( 0.12%) ???:llvm::GEPOperator::accumulateConstantOffset(llvm::DataLayout const&, llvm::APInt&, llvm::function_ref) const 9,174,269 ( 0.12%) ???:collectBitParts(llvm::Value*, bool, bool, std::map, std::less, std::allocator > > >&, int, bool&) 9,133,244 ( 0.12%) ???:llvm::SROA::splitAlloca(llvm::AllocaInst&, llvm::sroa::AllocaSlices&) 9,083,386 ( 0.12%) ???:(anonymous namespace)::StackColoring::runOnMachineFunction(llvm::MachineFunction&) 8,961,716 ( 0.12%) ???:llvm::GVN::performPRE(llvm::Function&) 8,917,375 ( 0.12%) ???:llvm::ConstantRange::makeExactICmpRegion(llvm::CmpInst::Predicate, llvm::APInt const&) 8,744,497 ( 0.11%) ???:llvm::SelectionDAG::getNode(unsigned int, llvm::SDLoc const&, llvm::EVT, llvm::SDValue, llvm::SDValue, llvm::SDNodeFlags) 8,666,438 ( 0.11%) ???:llvm::FoldingSet::NodeEquals(llvm::FoldingSetBase const*, llvm::FoldingSetBase::Node*, llvm::FoldingSetNodeID const&, unsigned int, llvm::FoldingSetNodeID&) 8,637,112 ( 0.11%) ???:llvm::X86InstrInfo::analyzeBranch(llvm::MachineBasicBlock&, llvm::MachineBasicBlock*&, llvm::MachineBasicBlock*&, llvm::SmallVectorImpl&, bool) const 8,592,301 ( 0.11%) ./string/../sysdeps/x86_64/multiarch/strlen-avx2.S:__strlen_avx2 8,422,206 ( 0.11%) ???:(anonymous namespace)::RegisterCoalescer::joinCopy(llvm::MachineInstr*, bool&) 8,347,346 ( 0.11%) ???:std::back_insert_iterator > > std::__copy_move_a2, false, llvm::GraphTraits >, std::back_insert_iterator > > >(llvm::po_iterator, false, llvm::GraphTraits >, llvm::po_iterator, false, llvm::GraphTraits >, std::back_insert_iterator > >) 8,278,149 ( 0.11%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::runSemiNCA(llvm::DominatorTreeBase&, unsigned int) 8,178,740 ( 0.11%) ???:llvm::isPotentiallyReachable(llvm::BasicBlock const*, llvm::BasicBlock const*, llvm::SmallPtrSetImpl const*, llvm::DominatorTree const*, llvm::LoopInfo const*) 8,157,851 ( 0.11%) ???:(anonymous namespace)::DAGCombiner::visitSTORE(llvm::SDNode*) 8,136,200 ( 0.11%) ???:llvm::KnownBits::computeForAddSub(bool, bool, llvm::KnownBits const&, llvm::KnownBits) 7,991,622 ( 0.10%) ???:llvm::ScheduleDAGSDNodes::AddSchedEdges() 7,980,790 ( 0.10%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::CalculateFromScratch(llvm::DominatorTreeBase&, llvm::DomTreeBuilder::SemiNCAInfo >::BatchUpdateInfo*) 7,866,643 ( 0.10%) ???:(anonymous namespace)::CodeGenPrepare::optimizeInst(llvm::Instruction*, bool&) 7,830,523 ( 0.10%) ???:llvm::ReachingDefAnalysis::enterBasicBlock(llvm::MachineBasicBlock*) 7,706,613 ( 0.10%) ???:llvm::FoldBranchToCommonDest(llvm::BranchInst*, llvm::DomTreeUpdater*, llvm::MemorySSAUpdater*, llvm::TargetTransformInfo const*, unsigned int) 7,700,992 ( 0.10%) ???:llvm::Twine::printOneChild(llvm::raw_ostream&, llvm::Twine::Child, llvm::Twine::NodeKind) const 7,694,960 ( 0.10%) ???:llvm::X86TargetMachine::getSubtargetImpl(llvm::Function const&) const -------------------------------------------------------------------------------- -- Auto-annotated source: /usr/home/liquid/.cargo/registry/src/github.com-1ecc6299db9ec823/hashbrown-0.12.0/src/raw/mod.rs -------------------------------------------------------------------------------- Ir -- line 111 ---------------------------------------- . const EMPTY: u8 = 0b1111_1111; . . /// Control byte value for a deleted bucket. . const DELETED: u8 = 0b1000_0000; . . /// Checks whether a control byte represents a full bucket (top bit is clear). . #[inline] . fn is_full(ctrl: u8) -> bool { 1,240,394 ( 0.02%) ctrl & 0x80 == 0 . } . . /// Checks whether a control byte represents a special value (top bit is set). . #[inline] . fn is_special(ctrl: u8) -> bool { . ctrl & 0x80 != 0 . } . . /// Checks whether a special control value is EMPTY (just check 1 bit). . #[inline] . fn special_is_empty(ctrl: u8) -> bool { . debug_assert!(is_special(ctrl)); 97,751 ( 0.00%) ctrl & 0x01 != 0 . } . . /// Primary hash function, used to select the initial bucket to probe from. . #[inline] . #[allow(clippy::cast_possible_truncation)] . fn h1(hash: u64) -> usize { . // On 32-bit platforms we simply ignore the higher hash bits. . hash as usize -- line 140 ---------------------------------------- -- line 143 ---------------------------------------- . /// Secondary hash function, saved in the low 7 bits of the control byte. . #[inline] . #[allow(clippy::cast_possible_truncation)] . fn h2(hash: u64) -> u8 { . // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit . // value, some hash functions (such as FxHash) produce a usize result . // instead, which means that the top 32 bits are 0 on 32-bit platforms. . let hash_len = usize::min(mem::size_of::(), mem::size_of::()); 26,239,745 ( 0.34%) let top7 = hash >> (hash_len * 8 - 7); . (top7 & 0x7f) as u8 // truncation . } . . /// Probe sequence based on triangular numbers, which is guaranteed (since our . /// table size is a power of two) to visit every group of elements exactly once. . /// . /// A triangular probe has us jump by 1 more group every time. So first we . /// jump by 1 group (meaning we just continue our linear scan), then 2 groups -- line 159 ---------------------------------------- -- line 170 ---------------------------------------- . #[inline] . fn move_next(&mut self, bucket_mask: usize) { . // We should have found an empty bucket by now and ended the probe. . debug_assert!( . self.stride <= bucket_mask, . "Went past end of probe sequence" . ); . 152,770 ( 0.00%) self.stride += Group::WIDTH; 152,770 ( 0.00%) self.pos += self.stride; 127,952 ( 0.00%) self.pos &= bucket_mask; . } . } . . /// Returns the number of buckets needed to hold the given number of items, . /// taking the maximum load factor into account. . /// . /// Returns `None` if an overflow occurs. . // Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258 . #[cfg_attr(target_os = "emscripten", inline(never))] . #[cfg_attr(not(target_os = "emscripten"), inline)] . fn capacity_to_buckets(cap: usize) -> Option { . debug_assert_ne!(cap, 0); . . // For small tables we require at least 1 empty bucket so that lookups are . // guaranteed to terminate if an element doesn't exist in the table. 80,576 ( 0.00%) if cap < 8 { . // We don't bother with a table size of 2 buckets since that can only . // hold a single element. Instead we skip directly to a 4 bucket table . // which can hold 3 elements. 161,855 ( 0.00%) return Some(if cap < 4 { 4 } else { 8 }); . } . . // Otherwise require 1/8 buckets to be empty (87.5% load) . // . // Be careful when modifying this, calculate_layout relies on the . // overflow check here. 47,502 ( 0.00%) let adjusted_cap = cap.checked_mul(8)? / 7; . . // Any overflows will have been caught by the checked_mul. Also, any . // rounding errors from the division above will be cleaned up by . // next_power_of_two (which can't overflow because of the previous division). . Some(adjusted_cap.next_power_of_two()) . } . . /// Returns the maximum effective capacity for the given bucket mask, taking . /// the maximum load factor into account. . #[inline] . fn bucket_mask_to_capacity(bucket_mask: usize) -> usize { 225,477 ( 0.00%) if bucket_mask < 8 { . // For tables with 1/2/4/8 buckets, we always reserve one empty slot. . // Keep in mind that the bucket mask is one less than the bucket count. . bucket_mask . } else { . // For larger tables we reserve 12.5% of the slots as empty. 76,270 ( 0.00%) ((bucket_mask + 1) / 8) * 7 . } . } . . /// Helper which allows the max calculation for ctrl_align to be statically computed for each T . /// while keeping the rest of `calculate_layout_for` independent of `T` . #[derive(Copy, Clone)] . struct TableLayout { . size: usize, -- line 233 ---------------------------------------- -- line 246 ---------------------------------------- . . #[inline] . fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> { . debug_assert!(buckets.is_power_of_two()); . . let TableLayout { size, ctrl_align } = self; . // Manual layout calculation since Layout methods are not yet stable. . let ctrl_offset = 166,130 ( 0.00%) size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1); 219,822 ( 0.00%) let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?; . . Some(( . unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }, . ctrl_offset, . )) . } . } . -- line 263 ---------------------------------------- -- line 337 ---------------------------------------- . } . } . #[cfg_attr(feature = "inline-more", inline)] . pub unsafe fn drop(&self) { . self.as_ptr().drop_in_place(); . } . #[inline] . pub unsafe fn read(&self) -> T { 1,748 ( 0.00%) self.as_ptr().read() . } . #[inline] . pub unsafe fn write(&self, val: T) { . self.as_ptr().write(val); . } . #[inline] . pub unsafe fn as_ref<'a>(&self) -> &'a T { . &*self.as_ptr() -- line 353 ---------------------------------------- -- line 422 ---------------------------------------- . /// Creates a new empty hash table without allocating any memory, using the . /// given allocator. . /// . /// In effect this returns a table with exactly 1 bucket. However we can . /// leave the data pointer dangling since that bucket is never written to . /// due to our load factor forcing us to always have at least 1 free bucket. . #[inline] . pub fn new_in(alloc: A) -> Self { 1,560 ( 0.00%) Self { . table: RawTableInner::new_in(alloc), . marker: PhantomData, . } . } . . /// Allocates a new hash table with the given number of buckets. . /// . /// The control bytes are left uninitialized. -- line 438 ---------------------------------------- -- line 440 ---------------------------------------- . unsafe fn new_uninitialized( . alloc: A, . buckets: usize, . fallibility: Fallibility, . ) -> Result { . debug_assert!(buckets.is_power_of_two()); . . Ok(Self { 60 ( 0.00%) table: RawTableInner::new_uninitialized( . alloc, . TableLayout::new::(), . buckets, . fallibility, . )?, . marker: PhantomData, . }) . } -- line 456 ---------------------------------------- -- line 458 ---------------------------------------- . /// Attempts to allocate a new hash table with at least enough capacity . /// for inserting the given number of elements without reallocating. . fn fallible_with_capacity( . alloc: A, . capacity: usize, . fallibility: Fallibility, . ) -> Result { . Ok(Self { 5,116 ( 0.00%) table: RawTableInner::fallible_with_capacity( . alloc, . TableLayout::new::(), . capacity, . fallibility, . )?, . marker: PhantomData, . }) . } -- line 474 ---------------------------------------- -- line 527 ---------------------------------------- . debug_assert_ne!(self.table.bucket_mask, 0); . debug_assert!(index < self.buckets()); . Bucket::from_base_index(self.data_end(), index) . } . . /// Erases an element from the table without dropping it. . #[cfg_attr(feature = "inline-more", inline)] . #[deprecated(since = "0.8.1", note = "use erase or remove instead")] 15,390 ( 0.00%) pub unsafe fn erase_no_drop(&mut self, item: &Bucket) { 15,390 ( 0.00%) let index = self.bucket_index(item); . self.table.erase(index); 30,780 ( 0.00%) } . . /// Erases an element from the table, dropping it in place. . #[cfg_attr(feature = "inline-more", inline)] . #[allow(clippy::needless_pass_by_value)] . #[allow(deprecated)] . pub unsafe fn erase(&mut self, item: Bucket) { . // Erase the element from the table first since drop might panic. 11,416 ( 0.00%) self.erase_no_drop(&item); . item.drop(); . } . . /// Finds and erases an element from the table, dropping it in place. . /// Returns true if an element was found. . #[cfg(feature = "raw")] . #[cfg_attr(feature = "inline-more", inline)] . pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool { -- line 554 ---------------------------------------- -- line 563 ---------------------------------------- . } . } . . /// Removes an element from the table, returning it. . #[cfg_attr(feature = "inline-more", inline)] . #[allow(clippy::needless_pass_by_value)] . #[allow(deprecated)] . pub unsafe fn remove(&mut self, item: Bucket) -> T { 19,364 ( 0.00%) self.erase_no_drop(&item); 40 ( 0.00%) item.read() . } . . /// Finds and removes an element from the table, returning it. . #[cfg_attr(feature = "inline-more", inline)] 423,536 ( 0.01%) pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { . // Avoid `Option::map` because it bloats LLVM IR. 1,324 ( 0.00%) match self.find(hash, eq) { 7,955 ( 0.00%) Some(bucket) => Some(unsafe { self.remove(bucket) }), 70,171 ( 0.00%) None => None, . } 569,649 ( 0.01%) } . . /// Marks all table buckets as empty without dropping their contents. . #[cfg_attr(feature = "inline-more", inline)] . pub fn clear_no_drop(&mut self) { . self.table.clear_no_drop(); . } . . /// Removes all elements from the table without freeing the backing memory. . #[cfg_attr(feature = "inline-more", inline)] . pub fn clear(&mut self) { . // Ensure that the table is reset even if one of the drops panic . let mut self_ = guard(self, |self_| self_.clear_no_drop()); . unsafe { 1 ( 0.00%) self_.drop_elements(); . } . } . 7 ( 0.00%) unsafe fn drop_elements(&mut self) { 6,190 ( 0.00%) if mem::needs_drop::() && !self.is_empty() { . for item in self.iter() { . item.drop(); . } . } 8 ( 0.00%) } . . /// Shrinks the table to fit `max(self.len(), min_size)` elements. . #[cfg_attr(feature = "inline-more", inline)] . pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) { . // Calculate the minimal number of elements that we need to reserve . // space for. . let min_size = usize::max(self.table.items, min_size); . if min_size == 0 { -- line 615 ---------------------------------------- -- line 642 ---------------------------------------- . } . } . } . . /// Ensures that at least `additional` items can be inserted into the table . /// without reallocation. . #[cfg_attr(feature = "inline-more", inline)] . pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { 258,363 ( 0.00%) if additional > self.table.growth_left { . // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. 146,409 ( 0.00%) if self . .reserve_rehash(additional, hasher, Fallibility::Infallible) . .is_err() . { . unsafe { hint::unreachable_unchecked() } . } . } . } . -- line 660 ---------------------------------------- -- line 671 ---------------------------------------- . } else { . Ok(()) . } . } . . /// Out-of-line slow path for `reserve` and `try_reserve`. . #[cold] . #[inline(never)] 324,246 ( 0.00%) fn reserve_rehash( . &mut self, . additional: usize, . hasher: impl Fn(&T) -> u64, . fallibility: Fallibility, . ) -> Result<(), TryReserveError> { . unsafe { . self.table.reserve_rehash_inner( . additional, -- line 687 ---------------------------------------- -- line 690 ---------------------------------------- . TableLayout::new::(), . if mem::needs_drop::() { . Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) . } else { . None . }, . ) . } 219,928 ( 0.00%) } . . /// Allocates a new table of a different size and moves the contents of the . /// current table into it. . fn resize( . &mut self, . capacity: usize, . hasher: impl Fn(&T) -> u64, . fallibility: Fallibility, -- line 706 ---------------------------------------- -- line 714 ---------------------------------------- . ) . } . } . . /// Inserts a new element into the table, and returns its raw bucket. . /// . /// This does not check if the given element already exists in the table. . #[cfg_attr(feature = "inline-more", inline)] 1,691,634 ( 0.02%) pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { . unsafe { . let mut index = self.table.find_insert_slot(hash); . . // We can avoid growing the table once we have reached our load . // factor if we are replacing a tombstone. This works since the . // number of EMPTY slots does not change in this case. 2,852 ( 0.00%) let old_ctrl = *self.table.ctrl(index); 1,179,175 ( 0.02%) if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) { . self.reserve(1, hasher); . index = self.table.find_insert_slot(hash); . } . . self.table.record_item_insert_at(index, old_ctrl, hash); . . let bucket = self.bucket(index); 4 ( 0.00%) bucket.write(value); . bucket . } 1,249,607 ( 0.02%) } . . /// Attempts to insert a new element without growing the table and return its raw bucket. . /// . /// Returns an `Err` containing the given element if inserting it would require growing the . /// table. . /// . /// This does not check if the given element already exists in the table. . #[cfg(feature = "raw")] -- line 749 ---------------------------------------- -- line 760 ---------------------------------------- . } . } . } . . /// Inserts a new element into the table, and returns a mutable reference to it. . /// . /// This does not check if the given element already exists in the table. . #[cfg_attr(feature = "inline-more", inline)] 432,808 ( 0.01%) pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T { 187 ( 0.00%) unsafe { self.insert(hash, value, hasher).as_mut() } 324,606 ( 0.00%) } . . /// Inserts a new element into the table, without growing the table. . /// . /// There must be enough space in the table to insert the new element. . /// . /// This does not check if the given element already exists in the table. . #[cfg_attr(feature = "inline-more", inline)] . #[cfg(any(feature = "raw", feature = "rustc-internal-api"))] 5,956 ( 0.00%) pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket { 262,822 ( 0.00%) let (index, old_ctrl) = self.table.prepare_insert_slot(hash); 9,402 ( 0.00%) let bucket = self.table.bucket(index); . . // If we are replacing a DELETED entry then we don't need to update . // the load counter. 563,582 ( 0.01%) self.table.growth_left -= special_is_empty(old_ctrl) as usize; . . bucket.write(value); 450,086 ( 0.01%) self.table.items += 1; . bucket 11,751 ( 0.00%) } . . /// Temporary removes a bucket, applying the given function to the removed . /// element and optionally put back the returned value in the same bucket. . /// . /// Returns `true` if the bucket still contains an element . /// . /// This does not check if the given bucket is actually occupied. . #[cfg_attr(feature = "inline-more", inline)] -- line 798 ---------------------------------------- -- line 813 ---------------------------------------- . true . } else { . false . } . } . . /// Searches for an element in the table. . #[inline] 49,318 ( 0.00%) pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { 7,436 ( 0.00%) let result = self.table.find_inner(hash, &mut |index| unsafe { 17,297 ( 0.00%) eq(self.bucket(index).as_ref()) 2,699 ( 0.00%) }); . . // Avoid `Option::map` because it bloats LLVM IR. . match result { 1,083 ( 0.00%) Some(index) => Some(unsafe { self.bucket(index) }), . None => None, . } 56,184 ( 0.00%) } . . /// Gets a reference to an element in the table. . #[inline] . pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { . // Avoid `Option::map` because it bloats LLVM IR. 20,273 ( 0.00%) match self.find(hash, eq) { . Some(bucket) => Some(unsafe { bucket.as_ref() }), . None => None, . } . } . . /// Gets a mutable reference to an element in the table. . #[inline] 2,114 ( 0.00%) pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { . // Avoid `Option::map` because it bloats LLVM IR. 23,375 ( 0.00%) match self.find(hash, eq) { . Some(bucket) => Some(unsafe { bucket.as_mut() }), . None => None, . } 2,416 ( 0.00%) } . . /// Attempts to get mutable references to `N` entries in the table at once. . /// . /// Returns an array of length `N` with the results of each query. . /// . /// At most one mutable reference will be returned to any entry. `None` will be returned if any . /// of the hashes are duplicates. `None` will be returned if the hash is not found. . /// -- line 859 ---------------------------------------- -- line 920 ---------------------------------------- . #[inline] . pub fn len(&self) -> usize { . self.table.items . } . . /// Returns `true` if the table contains no elements. . #[inline] . pub fn is_empty(&self) -> bool { 615,078 ( 0.01%) self.len() == 0 . } . . /// Returns the number of buckets in the table. . #[inline] . pub fn buckets(&self) -> usize { . self.table.bucket_mask + 1 . } . . /// Returns an iterator over every element in the table. It is up to . /// the caller to ensure that the `RawTable` outlives the `RawIter`. . /// Because we cannot make the `next` method unsafe on the `RawIter` . /// struct, we have to make the `iter` method unsafe. . #[inline] . pub unsafe fn iter(&self) -> RawIter { 1 ( 0.00%) let data = Bucket::from_base_index(self.data_end(), 0); . RawIter { . iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()), 57,251 ( 0.00%) items: self.table.items, . } . } . . /// Returns an iterator over occupied buckets that could match a given hash. . /// . /// `RawTable` only stores 7 bits of the hash value, so this iterator may . /// return items that have a hash value different than the one provided. You . /// should always validate the returned values before using them. -- line 954 ---------------------------------------- -- line 995 ---------------------------------------- . /// Iteration starts at the provided iterator's current location. . /// . /// It is up to the caller to ensure that the iterator is valid for this . /// `RawTable` and covers all items that remain in the table. . pub unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { . debug_assert_eq!(iter.len(), self.len()); . . let alloc = self.table.alloc.clone(); 2,344 ( 0.00%) let allocation = self.into_allocation(); 1,758 ( 0.00%) RawIntoIter { 2,930 ( 0.00%) iter, . allocation, . marker: PhantomData, . alloc, . } . } . . /// Converts the table into a raw allocation. The contents of the table . /// should be dropped using a `RawIter` before freeing the allocation. . #[cfg_attr(feature = "inline-more", inline)] . pub(crate) fn into_allocation(self) -> Option<(NonNull, Layout)> { 1,107 ( 0.00%) let alloc = if self.table.is_empty_singleton() { . None . } else { . // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. . let (layout, ctrl_offset) = match calculate_layout::(self.table.buckets()) { . Some(lco) => lco, . None => unsafe { hint::unreachable_unchecked() }, . }; . Some(( 210 ( 0.00%) unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }, . layout, . )) . }; . mem::forget(self); . alloc . } . } . -- line 1033 ---------------------------------------- -- line 1042 ---------------------------------------- . T: Sync, . A: Sync, . { . } . . impl RawTableInner { . #[inline] . const fn new_in(alloc: A) -> Self { 386,308 ( 0.01%) Self { . // Be careful to cast the entire slice to a raw pointer. . ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, . bucket_mask: 0, . items: 0, . growth_left: 0, . alloc, . } . } . } . . impl RawTableInner { . #[cfg_attr(feature = "inline-more", inline)] 290,019 ( 0.00%) unsafe fn new_uninitialized( . alloc: A, . table_layout: TableLayout, . buckets: usize, . fallibility: Fallibility, . ) -> Result { . debug_assert!(buckets.is_power_of_two()); . . // Avoid `Option::ok_or_else` because it bloats LLVM IR. -- line 1071 ---------------------------------------- -- line 1078 ---------------------------------------- . // exceed `isize::MAX`. We can skip this check on 64-bit systems since . // such allocations will never succeed anyways. . // . // This mirrors what Vec does in the standard library. . if mem::size_of::() < 8 && layout.size() > isize::MAX as usize { . return Err(fallibility.capacity_overflow()); . } . 70,872 ( 0.00%) let ptr: NonNull = match do_alloc(&alloc, layout) { . Ok(block) => block.cast(), . Err(_) => return Err(fallibility.alloc_err(layout)), . }; . . let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); 161,064 ( 0.00%) Ok(Self { . ctrl, 67,144 ( 0.00%) bucket_mask: buckets - 1, . items: 0, . growth_left: bucket_mask_to_capacity(buckets - 1), . alloc, . }) 211,242 ( 0.00%) } . . #[inline] 21,580 ( 0.00%) fn fallible_with_capacity( . alloc: A, . table_layout: TableLayout, . capacity: usize, . fallibility: Fallibility, . ) -> Result { 5,488 ( 0.00%) if capacity == 0 { 2,297 ( 0.00%) Ok(Self::new_in(alloc)) . } else { . unsafe { . let buckets = . capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?; . 131,925 ( 0.00%) let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?; . result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes()); . 13,560 ( 0.00%) Ok(result) . } . } 21,580 ( 0.00%) } . . /// Searches for an empty or deleted bucket which is suitable for inserting . /// a new element and sets the hash for that slot. . /// . /// There must be at least 1 empty bucket in the table. . #[inline] 91,917 ( 0.00%) unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) { . let index = self.find_insert_slot(hash); 91,917 ( 0.00%) let old_ctrl = *self.ctrl(index); . self.set_ctrl_h2(index, hash); . (index, old_ctrl) 183,834 ( 0.00%) } . . /// Searches for an empty or deleted bucket which is suitable for inserting . /// a new element. . /// . /// There must be at least 1 empty bucket in the table. . #[inline] . fn find_insert_slot(&self, hash: u64) -> usize { . let mut probe_seq = self.probe_seq(hash); . loop { . unsafe { . let group = Group::load(self.ctrl(probe_seq.pos)); 808,211 ( 0.01%) if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() { 2,361,821 ( 0.03%) let result = (probe_seq.pos + bit) & self.bucket_mask; . . // In tables smaller than the group width, trailing control . // bytes outside the range of the table are filled with . // EMPTY entries. These will unfortunately trigger a . // match, but once masked may point to a full bucket that . // is already occupied. We detect this situation here and . // perform a second scan starting at the beginning of the . // table. This second scan is guaranteed to find an empty . // slot (due to the load factor) before hitting the trailing . // control bytes (containing EMPTY). 1,081,508 ( 0.01%) if unlikely(is_full(*self.ctrl(result))) { . debug_assert!(self.bucket_mask < Group::WIDTH); . debug_assert_ne!(probe_seq.pos, 0); . return Group::load_aligned(self.ctrl(0)) . .match_empty_or_deleted() . .lowest_set_bit_nonzero(); . } . . return result; -- line 1165 ---------------------------------------- -- line 1171 ---------------------------------------- . . /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of . /// code generated, but it is eliminated by LLVM optimizations. . #[inline] . fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option { . let h2_hash = h2(hash); . let mut probe_seq = self.probe_seq(hash); . 85,232 ( 0.00%) loop { . let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; . 2,680,285 ( 0.03%) for bit in group.match_byte(h2_hash) { 5,375,546 ( 0.07%) let index = (probe_seq.pos + bit) & self.bucket_mask; . 3,741,759 ( 0.05%) if likely(eq(index)) { . return Some(index); . } . } . 783,906 ( 0.01%) if likely(group.match_empty().any_bit_set()) { . return None; . } . . probe_seq.move_next(self.bucket_mask); . } . } . . #[allow(clippy::mut_mut)] -- line 1198 ---------------------------------------- -- line 1225 ---------------------------------------- . Bucket::from_base_index(self.data_end(), index) . } . . #[inline] . unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 { . debug_assert_ne!(self.bucket_mask, 0); . debug_assert!(index < self.buckets()); . let base: *mut u8 = self.data_end().as_ptr(); 2,963,320 ( 0.04%) base.sub((index + 1) * size_of) . } . . #[inline] . unsafe fn data_end(&self) -> NonNull { . NonNull::new_unchecked(self.ctrl.as_ptr().cast()) . } . . /// Returns an iterator-like object for a probe sequence on the table. . /// . /// This iterator never terminates, but is guaranteed to visit each bucket . /// group exactly once. The loop using `probe_seq` must terminate upon . /// reaching a group containing an empty bucket. . #[inline] . fn probe_seq(&self, hash: u64) -> ProbeSeq { . ProbeSeq { 14,969,822 ( 0.19%) pos: h1(hash) & self.bucket_mask, . stride: 0, . } . } . . /// Returns the index of a bucket for which a value must be inserted if there is enough rooom . /// in the table, otherwise returns error . #[cfg(feature = "raw")] . #[inline] -- line 1257 ---------------------------------------- -- line 1263 ---------------------------------------- . } else { . self.record_item_insert_at(index, old_ctrl, hash); . Ok(index) . } . } . . #[inline] . unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) { 1,473,122 ( 0.02%) self.growth_left -= special_is_empty(old_ctrl) as usize; . self.set_ctrl_h2(index, hash); 1,178,460 ( 0.02%) self.items += 1; . } . . #[inline] . fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool { . let probe_seq_pos = self.probe_seq(hash).pos; . let probe_index = . |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH; . probe_index(i) == probe_index(new_i) -- line 1281 ---------------------------------------- -- line 1312 ---------------------------------------- . // replicate the buckets at the end of the trailing group. For example . // with 2 buckets and a group size of 4, the control bytes will look . // like this: . // . // Real | Replicated . // --------------------------------------------- . // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] | . // --------------------------------------------- 2,290,009 ( 0.03%) let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; . 762,781 ( 0.01%) *self.ctrl(index) = ctrl; 762,933 ( 0.01%) *self.ctrl(index2) = ctrl; . } . . /// Returns a pointer to a control byte. . #[inline] . unsafe fn ctrl(&self, index: usize) -> *mut u8 { . debug_assert!(index < self.num_ctrl_bytes()); . self.ctrl.as_ptr().add(index) . } . . #[inline] . fn buckets(&self) -> usize { 183,870 ( 0.00%) self.bucket_mask + 1 . } . . #[inline] . fn num_ctrl_bytes(&self) -> usize { 200,760 ( 0.00%) self.bucket_mask + 1 + Group::WIDTH . } . . #[inline] . fn is_empty_singleton(&self) -> bool { 730,240 ( 0.01%) self.bucket_mask == 0 . } . . #[allow(clippy::mut_mut)] . #[inline] . unsafe fn prepare_resize( . &self, . table_layout: TableLayout, . capacity: usize, . fallibility: Fallibility, . ) -> Result, TryReserveError> { . debug_assert!(self.items <= capacity); . . // Allocate and initialize the new table. 4,302 ( 0.00%) let mut new_table = RawTableInner::fallible_with_capacity( . self.alloc.clone(), . table_layout, . capacity, . fallibility, . )?; 89,398 ( 0.00%) new_table.growth_left -= self.items; . new_table.items = self.items; . . // The hash function may panic, in which case we simply free the new . // table without dropping any elements that may have been copied into . // it. . // . // This guard is also used to free the old table on success, see . // the comment at the bottom of this function. . Ok(guard(new_table, move |self_| { 39,461 ( 0.00%) if !self_.is_empty_singleton() { . self_.free_buckets(table_layout); . } . })) . } . . /// Reserves or rehashes to make room for `additional` more elements. . /// . /// This uses dynamic dispatch to reduce the amount of -- line 1383 ---------------------------------------- -- line 1388 ---------------------------------------- . &mut self, . additional: usize, . hasher: &dyn Fn(&mut Self, usize) -> u64, . fallibility: Fallibility, . layout: TableLayout, . drop: Option, . ) -> Result<(), TryReserveError> { . // Avoid `Option::ok_or_else` because it bloats LLVM IR. 78,927 ( 0.00%) let new_items = match self.items.checked_add(additional) { . Some(new_items) => new_items, . None => return Err(fallibility.capacity_overflow()), . }; 78,922 ( 0.00%) let full_capacity = bucket_mask_to_capacity(self.bucket_mask); 167,920 ( 0.00%) if new_items <= full_capacity / 2 { . // Rehash in-place without re-allocating if we have plenty of spare . // capacity that is locked up due to DELETED entries. . self.rehash_in_place(hasher, layout.size, drop); . Ok(()) . } else { . // Otherwise, conservatively resize to at least the next size up . // to avoid churning deletes into frequent rehashes. . self.resize_inner( 39,461 ( 0.00%) usize::max(new_items, full_capacity + 1), . hasher, . fallibility, . layout, . ) . } . } . . /// Allocates a new table of a different size and moves the contents of the -- line 1418 ---------------------------------------- -- line 1424 ---------------------------------------- . #[inline(always)] . unsafe fn resize_inner( . &mut self, . capacity: usize, . hasher: &dyn Fn(&mut Self, usize) -> u64, . fallibility: Fallibility, . layout: TableLayout, . ) -> Result<(), TryReserveError> { 10,617 ( 0.00%) let mut new_table = self.prepare_resize(layout, capacity, fallibility)?; . . // Copy all elements to the new table. . for i in 0..self.buckets() { 453,501 ( 0.01%) if !is_full(*self.ctrl(i)) { . continue; . } . . // This may panic. . let hash = hasher(self, i); . . // We can use a simpler version of insert() here since: . // - there are no DELETED entries. -- line 1444 ---------------------------------------- -- line 1454 ---------------------------------------- . } . . // We successfully copied all elements without panicking. Now replace . // self with the new table. The old table will have its memory freed but . // the items will not be dropped (since they have been moved into the . // new table). . mem::swap(self, &mut new_table); . 39,461 ( 0.00%) Ok(()) . } . . /// Rehashes the contents of the table in place (i.e. without changing the . /// allocation). . /// . /// If `hasher` panics then some the table's contents may be lost. . /// . /// This uses dynamic dispatch to reduce the amount of -- line 1470 ---------------------------------------- -- line 1554 ---------------------------------------- . #[inline] . unsafe fn free_buckets(&mut self, table_layout: TableLayout) { . // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. . let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) { . Some(lco) => lco, . None => hint::unreachable_unchecked(), . }; . self.alloc.deallocate( 26,386 ( 0.00%) NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)), . layout, . ); . } . . /// Marks all table buckets as empty without dropping their contents. . #[inline] . fn clear_no_drop(&mut self) { 12,731 ( 0.00%) if !self.is_empty_singleton() { . unsafe { . self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes()); . } . } 14,177 ( 0.00%) self.items = 0; 12,743 ( 0.00%) self.growth_left = bucket_mask_to_capacity(self.bucket_mask); . } . . #[inline] . unsafe fn erase(&mut self, index: usize) { . debug_assert!(is_full(*self.ctrl(index))); 75,732 ( 0.00%) let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask; . let empty_before = Group::load(self.ctrl(index_before)).match_empty(); . let empty_after = Group::load(self.ctrl(index)).match_empty(); . . // If we are inside a continuous block of Group::WIDTH full or deleted . // cells then a probe window may have seen a full block when trying to . // insert. We therefore need to keep that block non-empty so that . // lookups will continue searching to the next probe window. . // . // Note that in this context `leading_zeros` refers to the bytes at the . // end of a group, while `trailing_zeros` refers to the bytes at the . // beginning of a group. 302,928 ( 0.00%) let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH { . DELETED . } else { 370,635 ( 0.00%) self.growth_left += 1; . EMPTY . }; . self.set_ctrl(index, ctrl); 302,928 ( 0.00%) self.items -= 1; . } . } . . impl Clone for RawTable { 4,176 ( 0.00%) fn clone(&self) -> Self { 549 ( 0.00%) if self.table.is_empty_singleton() { . Self::new_in(self.table.alloc.clone()) . } else { . unsafe { . let mut new_table = ManuallyDrop::new( . // Avoid `Result::ok_or_else` because it bloats LLVM IR. . match Self::new_uninitialized( . self.table.alloc.clone(), . self.table.buckets(), -- line 1615 ---------------------------------------- -- line 1624 ---------------------------------------- . // We need to free the memory allocated for the new table. . new_table.free_buckets(); . }); . . // Return the newly created table. . ManuallyDrop::into_inner(new_table) . } . } 4,698 ( 0.00%) } . . fn clone_from(&mut self, source: &Self) { . if source.table.is_empty_singleton() { . *self = Self::new_in(self.table.alloc.clone()); . } else { . unsafe { . // First, drop all our elements without clearing the control bytes. . self.drop_elements(); -- line 1640 ---------------------------------------- -- line 1687 ---------------------------------------- . .table . .ctrl(0) . .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); . source . .data_start() . .copy_to_nonoverlapping(self.data_start(), self.table.buckets()); . . self.table.items = source.table.items; 24 ( 0.00%) self.table.growth_left = source.table.growth_left; . } . } . . impl RawTable { . /// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`. . #[cfg_attr(feature = "inline-more", inline)] . unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) { . // Copy the control bytes unchanged. We do this in a single pass -- line 1703 ---------------------------------------- -- line 1790 ---------------------------------------- . fn default() -> Self { . Self::new_in(Default::default()) . } . } . . #[cfg(feature = "nightly")] . unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable { . #[cfg_attr(feature = "inline-more", inline)] 312,173 ( 0.00%) fn drop(&mut self) { 305,861 ( 0.00%) if !self.table.is_empty_singleton() { . unsafe { . self.drop_elements(); . self.free_buckets(); . } . } 334,834 ( 0.00%) } . } . #[cfg(not(feature = "nightly"))] . impl Drop for RawTable { . #[cfg_attr(feature = "inline-more", inline)] . fn drop(&mut self) { . if !self.table.is_empty_singleton() { . unsafe { . self.drop_elements(); -- line 1813 ---------------------------------------- -- line 1817 ---------------------------------------- . } . } . . impl IntoIterator for RawTable { . type Item = T; . type IntoIter = RawIntoIter; . . #[cfg_attr(feature = "inline-more", inline)] 2,344 ( 0.00%) fn into_iter(self) -> RawIntoIter { . unsafe { . let iter = self.iter(); . self.into_iter_from(iter) . } 2,930 ( 0.00%) } . } . . /// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does . /// not track an item count. . pub(crate) struct RawIterRange { . // Mask of full buckets in the current group. Bits are cleared from this . // mask as each element is processed. . current_group: BitMask, -- line 1838 ---------------------------------------- -- line 1934 ---------------------------------------- . . impl Iterator for RawIterRange { . type Item = Bucket; . . #[cfg_attr(feature = "inline-more", inline)] . fn next(&mut self) -> Option> { . unsafe { . loop { 146,807 ( 0.00%) if let Some(index) = self.current_group.lowest_set_bit() { 23,734 ( 0.00%) self.current_group = self.current_group.remove_lowest_bit(); 38,651 ( 0.00%) return Some(self.data.next_n(index)); . } . 117,212 ( 0.00%) if self.next_ctrl >= self.end { . return None; . } . . // We might read past self.end up to the next group boundary, . // but this is fine because it only occurs on tables smaller . // than the group size where the trailing control bytes are all . // EMPTY. On larger tables self.end is guaranteed to be aligned . // to the group size (since tables are power-of-two sized). 3,563 ( 0.00%) self.current_group = Group::load_aligned(self.next_ctrl).match_full(); 3,528 ( 0.00%) self.data = self.data.next_n(Group::WIDTH); 6,405 ( 0.00%) self.next_ctrl = self.next_ctrl.add(Group::WIDTH); . } . } . } . . #[inline] . fn size_hint(&self) -> (usize, Option) { . // We don't have an item count, so just guess based on the range size. . ( -- line 1966 ---------------------------------------- -- line 2102 ---------------------------------------- . } . } else { . // We must have already iterated past the removed item. . } . } . } . . unsafe fn drop_elements(&mut self) { 1,306 ( 0.00%) if mem::needs_drop::() && self.len() != 0 { . for item in self { . item.drop(); . } . } . } . } . . impl Clone for RawIter { -- line 2118 ---------------------------------------- -- line 2124 ---------------------------------------- . } . } . } . . impl Iterator for RawIter { . type Item = Bucket; . . #[cfg_attr(feature = "inline-more", inline)] 24,282 ( 0.00%) fn next(&mut self) -> Option> { 62,734 ( 0.00%) if let Some(b) = self.iter.next() { 236,329 ( 0.00%) self.items -= 1; . Some(b) . } else { . // We don't check against items == 0 here to allow the . // compiler to optimize away the item count entirely if the . // iterator length is never queried. . debug_assert_eq!(self.items, 0); . None . } 48,564 ( 0.00%) } . . #[inline] . fn size_hint(&self) -> (usize, Option) { . (self.items, Some(self.items)) . } . } . . impl ExactSizeIterator for RawIter {} -- line 2151 ---------------------------------------- -- line 2177 ---------------------------------------- . T: Sync, . A: Sync, . { . } . . #[cfg(feature = "nightly")] . unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter { . #[cfg_attr(feature = "inline-more", inline)] 1,032 ( 0.00%) fn drop(&mut self) { . unsafe { . // Drop all remaining elements . self.iter.drop_elements(); . . // Free the table 5,300 ( 0.00%) if let Some((ptr, layout)) = self.allocation { . self.alloc.deallocate(ptr, layout); . } . } 150 ( 0.00%) } . } . #[cfg(not(feature = "nightly"))] . impl Drop for RawIntoIter { . #[cfg_attr(feature = "inline-more", inline)] . fn drop(&mut self) { . unsafe { . // Drop all remaining elements . self.iter.drop_elements(); -- line 2203 ---------------------------------------- -- line 2209 ---------------------------------------- . } . } . } . . impl Iterator for RawIntoIter { . type Item = T; . . #[cfg_attr(feature = "inline-more", inline)] 1,298 ( 0.00%) fn next(&mut self) -> Option { 162 ( 0.00%) unsafe { Some(self.iter.next()?.read()) } 3,714 ( 0.00%) } . . #[inline] . fn size_hint(&self) -> (usize, Option) { 4 ( 0.00%) self.iter.size_hint() . } . } . . impl ExactSizeIterator for RawIntoIter {} . impl FusedIterator for RawIntoIter {} . . /// Iterator which consumes elements without freeing the table storage. . pub struct RawDrain<'a, T, A: Allocator + Clone = Global> { -- line 2231 ---------------------------------------- -- line 2259 ---------------------------------------- . where . T: Sync, . A: Sync, . { . } . . impl Drop for RawDrain<'_, T, A> { . #[cfg_attr(feature = "inline-more", inline)] 2,208 ( 0.00%) fn drop(&mut self) { . unsafe { . // Drop all remaining elements. Note that this may panic. . self.iter.drop_elements(); . . // Reset the contents of the table now that all elements have been . // dropped. . self.table.clear_no_drop(); . . // Move the now empty table back to its original location. 276 ( 0.00%) self.orig_table . .as_ptr() . .copy_from_nonoverlapping(&*self.table, 1); . } 2,208 ( 0.00%) } . } . . impl Iterator for RawDrain<'_, T, A> { . type Item = T; . . #[cfg_attr(feature = "inline-more", inline)] . fn next(&mut self) -> Option { . unsafe { -- line 2289 ---------------------------------------- 8,440,235 ( 0.11%) -------------------------------------------------------------------------------- The following files chosen for auto-annotation could not be found: -------------------------------------------------------------------------------- ./elf/dl-lookup.c ./malloc/malloc.c ./string/../sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S ./string/../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S ./string/../sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S ./string/../sysdeps/x86_64/multiarch/strcmp-avx2.S ./string/../sysdeps/x86_64/multiarch/strlen-avx2.S /tmp/gcc-build/x86_64-unknown-linux-gnu/libstdc++-v3/libsupc++/../../../../gcc-5.5.0/libstdc++-v3/libsupc++/new_op.cc -------------------------------------------------------------------------------- Ir -------------------------------------------------------------------------------- 86,166,629 ( 1.12%) events annotated