-------------------------------------------------------------------------------- I1 cache: 65536 B, 64 B, 4-way associative D1 cache: 32768 B, 64 B, 8-way associative LL cache: 67108864 B, 64 B, 64-way associative Command: /usr/home/liquid/.rustup/toolchains/w-profiling/bin/rustc --crate-name core_foundation src/lib.rs --error-format=json --json=diagnostic-rendered-ansi,artifacts,future-incompat --crate-type lib --emit=dep-info,metadata,link -C opt-level=3 -C embed-bitcode=no -C metadata=0ca5cbbfbd5c42e1 -C extra-filename=-0ca5cbbfbd5c42e1 --out-dir /usr/home/liquid/tmp/.tmpbqIsKo/target/release/deps -L dependency=/usr/home/liquid/tmp/.tmpbqIsKo/target/release/deps --extern core_foundation_sys=/usr/home/liquid/tmp/.tmpbqIsKo/target/release/deps/libcore_foundation_sys-5d0941aa502f22de.rmeta --extern libc=/usr/home/liquid/tmp/.tmpbqIsKo/target/release/deps/liblibc-adbe6767c2fae048.rmeta -Adeprecated -Aunknown-lints -Zincremental-verify-ich Data file: results/cgout-w-profiling-core-foundation-0.9.2-Opt-Full Events recorded: Ir Events shown: Ir Event sort order: Ir Thresholds: 0.1 Include dirs: User annotated: Auto-annotation: on -------------------------------------------------------------------------------- Ir -------------------------------------------------------------------------------- 3,245,725,719 (100.0%) PROGRAM TOTALS -------------------------------------------------------------------------------- Ir file:function -------------------------------------------------------------------------------- 124,226,096 ( 3.83%) ./malloc/malloc.c:_int_free 108,201,078 ( 3.33%) ./malloc/malloc.c:_int_malloc 104,543,942 ( 3.22%) ???:llvm::FPPassManager::runOnFunction(llvm::Function&) 76,077,925 ( 2.34%) ./malloc/malloc.c:malloc 51,960,755 ( 1.60%) ???:llvm::AnalysisManager::getResultImpl(llvm::AnalysisKey*, llvm::Function&) 48,273,691 ( 1.49%) ???:llvm::AnalysisManager::invalidate(llvm::Function&, llvm::PreservedAnalyses const&) 40,504,925 ( 1.25%) ./malloc/malloc.c:free 35,921,947 ( 1.11%) ???:llvm::InstCombinerImpl::run() 32,988,375 ( 1.02%) ???:llvm::BitstreamCursor::readRecord(unsigned int, llvm::SmallVectorImpl&, llvm::StringRef*) 27,790,043 ( 0.86%) ???:llvm::SelectionDAG::Combine(llvm::CombineLevel, llvm::AAResults*, llvm::CodeGenOpt::Level) 26,677,491 ( 0.82%) ???:combineInstructionsOverFunction(llvm::Function&, llvm::InstCombineWorklist&, llvm::AAResults*, llvm::AssumptionCache&, llvm::TargetLibraryInfo&, llvm::TargetTransformInfo&, llvm::DominatorTree&, llvm::OptimizationRemarkEmitter&, llvm::BlockFrequencyInfo*, llvm::ProfileSummaryInfo*, unsigned int, llvm::LoopInfo*) 25,040,294 ( 0.77%) ./string/../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:__memcpy_avx_unaligned_erms 22,232,131 ( 0.68%) ???:llvm::AttributeList::addAttributes(llvm::LLVMContext&, unsigned int, llvm::AttrBuilder const&) const 21,422,697 ( 0.66%) ./string/../sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S:__memcmp_avx2_movbe 20,027,597 ( 0.62%) ???:llvm::AttributeList::get(llvm::LLVMContext&, llvm::ArrayRef) 19,623,163 ( 0.60%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::CalculateFromScratch(llvm::DominatorTreeBase&, llvm::DomTreeBuilder::SemiNCAInfo >::BatchUpdateInfo*) 18,507,832 ( 0.57%) ???:llvm::TargetLibraryInfoImpl::getLibFunc(llvm::Function const&, llvm::LibFunc&) const 18,306,325 ( 0.56%) ???:runCVP(llvm::Module&) [clone .llvm.11785992503873176614] 16,087,214 ( 0.50%) ./malloc/malloc.c:malloc_consolidate 15,977,728 ( 0.49%) ???:llvm::coro::declaresIntrinsics(llvm::Module const&, std::initializer_list) 15,084,326 ( 0.46%) ???:llvm::LiveVariables::runOnBlock(llvm::MachineBasicBlock*, unsigned int) 15,047,630 ( 0.46%) ???:llvm::PMDataManager::verifyPreservedAnalysis(llvm::Pass*) 14,762,908 ( 0.45%) ./string/../sysdeps/x86_64/multiarch/strcmp-avx2.S:__strncmp_avx2 14,050,448 ( 0.43%) ./string/../sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S:__memset_avx2_erms 12,965,506 ( 0.40%) ???:llvm::detail::PassModel>, llvm::PreservedAnalyses, llvm::AnalysisManager>::run(llvm::Function&, llvm::AnalysisManager&) 12,590,480 ( 0.39%) ???:llvm::ValueHandleBase::AddToUseList() 12,221,682 ( 0.38%) ???:bool llvm::DenseMapBase*, llvm::DenseMapInfo<(anonymous namespace)::SimpleValue>, llvm::detail::DenseMapPair<(anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*> >, (anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*, llvm::DenseMapInfo<(anonymous namespace)::SimpleValue>, llvm::detail::DenseMapPair<(anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*> >::LookupBucketFor<(anonymous namespace)::SimpleValue>((anonymous namespace)::SimpleValue const&, llvm::detail::DenseMapPair<(anonymous namespace)::SimpleValue, llvm::ScopedHashTableVal<(anonymous namespace)::SimpleValue, llvm::Value*>*> const*&) const 12,058,672 ( 0.37%) ???:llvm_regexec 11,826,576 ( 0.36%) ./malloc/malloc.c:unlink_chunk.constprop.0 11,393,690 ( 0.35%) /tmp/gcc-build/x86_64-unknown-linux-gnu/libstdc++-v3/libsupc++/../../../../gcc-5.5.0/libstdc++-v3/libsupc++/new_op.cc:operator new(unsigned long) 11,275,727 ( 0.35%) ???:llvm::removeUnreachableBlocks(llvm::Function&, llvm::DomTreeUpdater*, llvm::MemorySSAUpdater*) 10,626,693 ( 0.33%) ???:llvm::InlineFunction(llvm::CallBase&, llvm::InlineFunctionInfo&, llvm::AAResults*, bool, llvm::Function*) 10,308,956 ( 0.32%) ./elf/dl-lookup.c:_dl_lookup_symbol_x 10,283,344 ( 0.32%) ???:llvm::PassRegistry::enumerateWith(llvm::PassRegistrationListener*) 10,054,826 ( 0.31%) ???:llvm::AttributeSetNode::get(llvm::LLVMContext&, llvm::AttrBuilder const&) 9,940,543 ( 0.31%) ???:llvm::InstCombinerImpl::visitCallInst(llvm::CallInst&) 9,720,179 ( 0.30%) ???:computeKnownBits(llvm::Value const*, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) [clone .llvm.15619146473165121143] 9,460,785 ( 0.29%) ???:SimplifyICmpInst(unsigned int, llvm::Value*, llvm::Value*, llvm::SimplifyQuery const&, unsigned int) [clone .llvm.1619516508949622737] 9,357,034 ( 0.29%) ???:llvm::MD5::final(llvm::MD5::MD5Result&) 9,222,479 ( 0.28%) ???:isKnownNonZero(llvm::Value const*, llvm::APInt const&, unsigned int, (anonymous namespace)::Query const&) [clone .llvm.15619146473165121143] 8,847,551 ( 0.27%) ???:(anonymous namespace)::MachineCopyPropagation::runOnMachineFunction(llvm::MachineFunction&) 8,741,532 ( 0.27%) ???:llvm::DataLayout::getAlignment(llvm::Type*, bool) const 8,275,074 ( 0.25%) ./string/../sysdeps/x86_64/multiarch/strlen-avx2.S:__strlen_avx2 8,185,596 ( 0.25%) ???:(anonymous namespace)::LazyValueInfoImpl::solve() [clone .llvm.4316243980339171764] 8,101,219 ( 0.25%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::CalculateFromScratch(llvm::DominatorTreeBase&, llvm::DomTreeBuilder::SemiNCAInfo >::BatchUpdateInfo*) 8,001,644 ( 0.25%) ???:llvm::SimplifyInstruction(llvm::Instruction*, llvm::SimplifyQuery const&, llvm::OptimizationRemarkEmitter*) 7,877,513 ( 0.24%) ???:(anonymous namespace)::BitcodeReader::parseModule(unsigned long, bool, llvm::function_ref, std::allocator > > (llvm::StringRef)>) 7,829,283 ( 0.24%) ???:(anonymous namespace)::EarlyCSE::run() [clone .llvm.7062997131228810369] 7,756,932 ( 0.24%) ???:llvm::PMDataManager::removeNotPreservedAnalysis(llvm::Pass*) 7,686,751 ( 0.24%) ???:llvm::AttributeList::addAttribute(llvm::LLVMContext&, unsigned int, llvm::Attribute::AttrKind) const 7,485,257 ( 0.23%) ./string/../sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S:__memset_avx2_unaligned_erms 7,454,366 ( 0.23%) ???:llvm::MD5::update(llvm::StringRef) 7,137,696 ( 0.22%) ???:llvm::X86TargetLowering::X86TargetLowering(llvm::X86TargetMachine const&, llvm::X86Subtarget const&) 6,996,430 ( 0.22%) ???:(anonymous namespace)::DeadMachineInstructionElim::eliminateDeadMI(llvm::MachineFunction&) 6,831,168 ( 0.21%) ???:llvm::FindFunctionBackedges(llvm::Function const&, llvm::SmallVectorImpl >&) 6,765,959 ( 0.21%) ???:llvm::SelectionDAGISel::SelectCodeCommon(llvm::SDNode*, unsigned char const*, unsigned int) 6,764,039 ( 0.21%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::FindRoots(llvm::DominatorTreeBase const&, llvm::DomTreeBuilder::SemiNCAInfo >::BatchUpdateInfo*) 6,736,445 ( 0.21%) ???:llvm::simplifyCFG(llvm::BasicBlock*, llvm::TargetTransformInfo const&, llvm::DomTreeUpdater*, llvm::SimplifyCFGOptions const&, llvm::ArrayRef) 6,585,989 ( 0.20%) ???:llvm::LiveVariables::HandleRegMask(llvm::MachineOperand const&) 6,298,803 ( 0.19%) /usr/home/liquid/rust/worktree-benchmarking/library/core/src/num/uint_macros.rs:::short_write_process_buffer:: 6,252,655 ( 0.19%) ???:llvm::InstCombinerImpl::visitICmpInst(llvm::ICmpInst&) 6,189,502 ( 0.19%) ???:??? 6,105,920 ( 0.19%) ???:llvm::TargetLoweringBase::computeRegisterProperties(llvm::TargetRegisterInfo const*) 6,098,528 ( 0.19%) ???:llvm::DataLayout::getTypeSizeInBits(llvm::Type*) const 6,033,048 ( 0.19%) ???:llvm::MemorySSA::buildMemorySSA(llvm::BatchAAResults&) 5,957,700 ( 0.18%) ???:llvm::FoldingSetBase::FindNodeOrInsertPos(llvm::FoldingSetNodeID const&, void*&, llvm::FoldingSetBase::FoldingSetInfo const&) 5,782,552 ( 0.18%) ./malloc/malloc.c:realloc 5,562,007 ( 0.17%) ./malloc/malloc.c:calloc 5,354,183 ( 0.16%) ???:llvm::DomTreeBuilder::SemiNCAInfo >::runSemiNCA(llvm::DominatorTreeBase&, unsigned int) 5,320,029 ( 0.16%) /usr/home/liquid/.cargo/registry/src/github.com-1ecc6299db9ec823/hashbrown-0.12.0/src/raw/mod.rs:, (), core::hash::BuildHasherDefault>>::from_hash::>::{closure#0}> 5,279,003 ( 0.16%) ???:(anonymous namespace)::AggressiveDeadCodeElimination::performDeadCodeElimination() 5,251,581 ( 0.16%) ???:updateCGAndAnalysisManagerForPass(llvm::LazyCallGraph&, llvm::LazyCallGraph::SCC&, llvm::LazyCallGraph::Node&, llvm::AnalysisManager&, llvm::CGSCCUpdateResult&, llvm::AnalysisManager&, bool) [clone .llvm.5426518467876156712] 5,197,482 ( 0.16%) ???:llvm::BasicAAResult::alias(llvm::MemoryLocation const&, llvm::MemoryLocation const&, llvm::AAQueryInfo&) 5,147,701 ( 0.16%) ???:computeKnownBitsFromAssume(llvm::Value const*, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) 5,142,203 ( 0.16%) ???:llvm::ScheduleDAGSDNodes::BuildSchedUnits() 5,124,146 ( 0.16%) ???:(anonymous namespace)::LazyValueInfoImpl::getValueInBlock(llvm::Value*, llvm::BasicBlock*, llvm::Instruction*) [clone .llvm.4316243980339171764] 5,108,985 ( 0.16%) ???:llvm::SROA::runOnAlloca(llvm::AllocaInst&) 5,045,546 ( 0.16%) ???:llvm::LivePhysRegs::stepBackward(llvm::MachineInstr const&) 5,012,641 ( 0.15%) ???:(anonymous namespace)::eliminateDeadStores(llvm::Function&, llvm::AAResults&, llvm::MemorySSA&, llvm::DominatorTree&, llvm::PostDominatorTree&, llvm::TargetLibraryInfo const&, llvm::LoopInfo const&) [clone .llvm.5769264623867638418] 4,984,619 ( 0.15%) ???:(anonymous namespace)::CVPLatticeFunc::ComputeInstructionState(llvm::Instruction&, llvm::DenseMap, llvm::PointerIntPairInfo > >, (anonymous namespace)::CVPLatticeVal, llvm::DenseMapInfo, llvm::PointerIntPairInfo > > >, llvm::detail::DenseMapPair, llvm::PointerIntPairInfo > >, (anonymous namespace)::CVPLatticeVal> >&, llvm::SparseSolver, llvm::PointerIntPairInfo > >, (anonymous namespace)::CVPLatticeVal, llvm::LatticeKeyInfo, llvm::PointerIntPairInfo > > > >&) 4,874,958 ( 0.15%) ???:computeKnownBitsFromOperator(llvm::Operator const*, llvm::APInt const&, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) 4,855,498 ( 0.15%) ???:llvm::BlockFrequencyInfoImpl::initializeRPOT() 4,845,137 ( 0.15%) ./elf/dl-lookup.c:do_lookup_x 4,831,088 ( 0.15%) ???:llvm::X86TargetMachine::getTargetTransformInfo(llvm::Function const&) 4,828,380 ( 0.15%) ???:llvm::TargetLoweringBase::getTypeConversion(llvm::LLVMContext&, llvm::EVT) const 4,666,570 ( 0.14%) ???:computeKnownBits(llvm::Value const*, llvm::APInt const&, llvm::KnownBits&, unsigned int, (anonymous namespace)::Query const&) 4,664,078 ( 0.14%) ???:(anonymous namespace)::PruningFunctionCloner::CloneBlock(llvm::BasicBlock const*, llvm::ilist_iterator, false, true>, std::vector >&) 4,421,872 ( 0.14%) ???:llvm::PassManager, llvm::LazyCallGraph&, llvm::CGSCCUpdateResult&>::run(llvm::LazyCallGraph::SCC&, llvm::AnalysisManager&, llvm::LazyCallGraph&, llvm::CGSCCUpdateResult&) 4,299,291 ( 0.13%) ???:llvm::BlockFrequencyInfoImplBase::finalizeMetrics() 4,284,558 ( 0.13%) ???:llvm::detail::AnalysisResultModel, llvm::Function>, llvm::OuterAnalysisManagerProxy, llvm::Function>::Result, llvm::PreservedAnalyses, llvm::AnalysisManager::Invalidator, true>::invalidate(llvm::Function&, llvm::PreservedAnalyses const&, llvm::AnalysisManager::Invalidator&) 4,225,284 ( 0.13%) /usr/home/liquid/rust/worktree-benchmarking/compiler/rustc_data_structures/src/sip128.rs:::short_write_process_buffer:: 4,213,853 ( 0.13%) ???:llvm::PMTopLevelManager::setLastUser(llvm::ArrayRef, llvm::Pass*) 4,197,124 ( 0.13%) ./stdlib/msort.c:msort_with_tmp.part.0 4,143,887 ( 0.13%) ???:llvm::DenseMapBase, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> >, llvm::DenseMapInfo >, llvm::detail::DenseMapPair, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> > > >, llvm::PoisoningVH, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> >, llvm::DenseMapInfo >, llvm::detail::DenseMapPair, std::unique_ptr<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry, std::default_delete<(anonymous namespace)::LazyValueInfoCache::BlockCacheEntry> > > >::destroyAll() [clone .llvm.4316243980339171764] 4,141,271 ( 0.13%) ./elf/../sysdeps/x86_64/dl-machine.h:_dl_relocate_object 4,058,036 ( 0.13%) ???:llvm::SHA1::hashBlock() 4,056,997 ( 0.12%) ???:(anonymous namespace)::Verifier::visitInstruction(llvm::Instruction&) 4,056,377 ( 0.12%) ???:SetImpliedBits(llvm::FeatureBitset&, llvm::FeatureBitset const&, llvm::ArrayRef) 4,048,876 ( 0.12%) ???:simplifyFunctionCFGImpl(llvm::Function&, llvm::TargetTransformInfo const&, llvm::DominatorTree*, llvm::SimplifyCFGOptions const&) [clone .llvm.11597842506770977528] 4,032,246 ( 0.12%) ???:bool llvm::detail::UniqueFunctionBase::CallImpl(void*, llvm::StringRef, llvm::Any&) 4,021,522 ( 0.12%) ./malloc/malloc.c:_int_realloc 4,013,720 ( 0.12%) ???:int llvm::array_pod_sort_comparator(void const*, void const*) 4,002,016 ( 0.12%) ???:llvm::DemandedBits::isInstructionDead(llvm::Instruction*) 3,986,070 ( 0.12%) ???:llvm::InstCombinerImpl::SimplifyDemandedUseBits(llvm::Value*, llvm::APInt, llvm::KnownBits&, unsigned int, llvm::Instruction*) 3,947,164 ( 0.12%) ???:llvm::InstCombinerImpl::visitLoadInst(llvm::LoadInst&) 3,941,085 ( 0.12%) ???:runImpl(llvm::Function&, llvm::LazyValueInfo*, llvm::DominatorTree*, llvm::SimplifyQuery const&) [clone .llvm.16011871802505272439] 3,925,463 ( 0.12%) ./string/../sysdeps/x86_64/multiarch/memchr-avx2.S:__memchr_avx2 3,918,777 ( 0.12%) ???:llvm::SimplifyGEPInst(llvm::Type*, llvm::ArrayRef, llvm::SimplifyQuery const&) 3,902,993 ( 0.12%) ./string/../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:__memcpy_sse2_unaligned_erms 3,901,633 ( 0.12%) ???:llvm::Value::stripAndAccumulateConstantOffsets(llvm::DataLayout const&, llvm::APInt&, bool, llvm::function_ref) const 3,894,988 ( 0.12%) ???:llvm::MachineInstr::addOperand(llvm::MachineFunction&, llvm::MachineOperand const&) 3,871,036 ( 0.12%) ???:llvm::X86TargetMachine::getSubtargetImpl(llvm::Function const&) const 3,865,996 ( 0.12%) ???:llvm::SCCPInstVisitor::solve() 3,864,358 ( 0.12%) ???:llvm::GlobalValue::isInterposable() const 3,860,088 ( 0.12%) ???:llvm::FoldingSet::NodeEquals(llvm::FoldingSetBase const*, llvm::FoldingSetBase::Node*, llvm::FoldingSetNodeID const&, unsigned int, llvm::FoldingSetNodeID&) 3,853,485 ( 0.12%) ???:llvm::detail::AnalysisResultModel::Invalidator, true>::~AnalysisResultModel() 3,776,497 ( 0.12%) ???:(anonymous namespace)::DAGCombiner::combine(llvm::SDNode*) 3,774,313 ( 0.12%) ???:char const** std::__find_if >(char const**, char const**, __gnu_cxx::__ops::_Iter_equals_val, std::random_access_iterator_tag) 3,766,996 ( 0.12%) ???:llvm::GVN::processBlock(llvm::BasicBlock*) 3,765,729 ( 0.12%) ???:llvm::Value::~Value() 3,673,776 ( 0.11%) ???:llvm::PopulateLoopsDFS::traverse(llvm::BasicBlock*) 3,616,080 ( 0.11%) ???:llvm::PMTopLevelManager::AUFoldingSetNode::Profile(llvm::FoldingSetNodeID&, llvm::AnalysisUsage const&) 3,583,557 ( 0.11%) ???:llvm::LoopInfoBase::analyze(llvm::DominatorTreeBase const&) 3,498,673 ( 0.11%) ???:llvm::InstCombinerImpl::visitStoreInst(llvm::StoreInst&) 3,498,321 ( 0.11%) ???:(anonymous namespace)::SimplifyCFGOpt::simplifyCondBranch(llvm::BranchInst*, llvm::IRBuilder&) 3,492,307 ( 0.11%) ???:llvm::ReassociatePass::BuildRankMap(llvm::Function&, llvm::ReversePostOrderTraversal >&) 3,484,629 ( 0.11%) ???:std::back_insert_iterator > > std::__copy_move_a2, false, llvm::GraphTraits >, std::back_insert_iterator > > >(llvm::po_iterator, false, llvm::GraphTraits >, llvm::po_iterator, false, llvm::GraphTraits >, std::back_insert_iterator > >) 3,482,215 ( 0.11%) /usr/home/liquid/rust/worktree-benchmarking/compiler/rustc_span/src/caching_source_map_view.rs:::span_data_to_lines_and_cols 3,474,419 ( 0.11%) ???:llvm::Function::Function(llvm::FunctionType*, llvm::GlobalValue::LinkageTypes, unsigned int, llvm::Twine const&, llvm::Module*) 3,435,569 ( 0.11%) ???:llvm::MemorySSA::OptimizeUses::optimizeUses() 3,433,926 ( 0.11%) ???:llvm::isNonEscapingLocalObject(llvm::Value const*, llvm::SmallDenseMap, llvm::detail::DenseMapPair >*) 3,388,617 ( 0.10%) /usr/home/liquid/rust/worktree-benchmarking/library/std/src/sys/unix/alloc.rs:__rdl_alloc 3,388,240 ( 0.10%) ???:(anonymous namespace)::GetCFGOnlyPasses::passEnumerate(llvm::PassInfo const*) [clone .llvm.764396836974782617] 3,340,237 ( 0.10%) ???:llvm::DAGTypeLegalizer::run() 3,337,411 ( 0.10%) ???:llvm::InstCombinePass::run(llvm::Function&, llvm::AnalysisManager&) 3,320,706 ( 0.10%) ???:(anonymous namespace)::LazyValueInfoImpl::getEdgeValue(llvm::Value*, llvm::BasicBlock*, llvm::BasicBlock*, llvm::Instruction*) [clone .llvm.4316243980339171764] 3,306,176 ( 0.10%) ???:llvm::ConstantFoldTerminator(llvm::BasicBlock*, bool, llvm::TargetLibraryInfo const*, llvm::DomTreeUpdater*) 3,268,030 ( 0.10%) ???:llvm::UpgradeFunctionAttributes(llvm::Function&) 3,253,133 ( 0.10%) ???:llvm::InstCombinerImpl::visitBitCast(llvm::BitCastInst&) -------------------------------------------------------------------------------- -- Auto-annotated source: /usr/home/liquid/rust/worktree-benchmarking/library/core/src/num/uint_macros.rs -------------------------------------------------------------------------------- Ir -- line 57 ---------------------------------------- . /// # Examples . /// . /// Basic usage: . /// . /// ``` . #[doc = concat!("assert_eq!(", stringify!($SelfT), "::from_str_radix(\"A\", 16), Ok(10));")] . /// ``` . #[stable(feature = "rust1", since = "1.0.0")] 182 ( 0.00%) pub fn from_str_radix(src: &str, radix: u32) -> Result { 91 ( 0.00%) from_str_radix(src, radix) 273 ( 0.00%) } . . /// Returns the number of ones in the binary representation of `self`. . /// . /// # Examples . /// . /// Basic usage: . /// . /// ``` -- line 75 ---------------------------------------- -- line 80 ---------------------------------------- . #[stable(feature = "rust1", since = "1.0.0")] . #[rustc_const_stable(feature = "const_math", since = "1.32.0")] . #[doc(alias = "popcount")] . #[doc(alias = "popcnt")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline(always)] . pub const fn count_ones(self) -> u32 { 218,490 ( 0.01%) intrinsics::ctpop(self as $ActualT) as u32 . } . . /// Returns the number of zeros in the binary representation of `self`. . /// . /// # Examples . /// . /// Basic usage: . /// -- line 96 ---------------------------------------- -- line 118 ---------------------------------------- . /// assert_eq!(n.leading_zeros(), 2); . /// ``` . #[stable(feature = "rust1", since = "1.0.0")] . #[rustc_const_stable(feature = "const_math", since = "1.32.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline(always)] . pub const fn leading_zeros(self) -> u32 { 410,475 ( 0.01%) intrinsics::ctlz(self as $ActualT) as u32 . } . . /// Returns the number of trailing zeros in the binary representation . /// of `self`. . /// . /// # Examples . /// . /// Basic usage: -- line 134 ---------------------------------------- -- line 139 ---------------------------------------- . /// assert_eq!(n.trailing_zeros(), 3); . /// ``` . #[stable(feature = "rust1", since = "1.0.0")] . #[rustc_const_stable(feature = "const_math", since = "1.32.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline(always)] . pub const fn trailing_zeros(self) -> u32 { 341,349 ( 0.01%) intrinsics::cttz(self) as u32 . } . . /// Returns the number of leading ones in the binary representation of `self`. . /// . /// # Examples . /// . /// Basic usage: . /// -- line 155 ---------------------------------------- -- line 204 ---------------------------------------- . #[doc = concat!("assert_eq!(n.rotate_left(", $rot, "), m);")] . /// ``` . #[stable(feature = "rust1", since = "1.0.0")] . #[rustc_const_stable(feature = "const_math", since = "1.32.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline(always)] . pub const fn rotate_left(self, n: u32) -> Self { 7,535,625 ( 0.23%) intrinsics::rotate_left(self, n as $SelfT) . } . . /// Shifts the bits to the right by a specified amount, `n`, . /// wrapping the truncated bits to the beginning of the resulting . /// integer. . /// . /// Please note this isn't the same operation as the `>>` shifting operator! . /// -- line 220 ---------------------------------------- -- line 430 ---------------------------------------- . #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(3), None);")] . /// ``` . #[stable(feature = "rust1", since = "1.0.0")] . #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline] . pub const fn checked_add(self, rhs: Self) -> Option { 14 ( 0.00%) let (a, b) = self.overflowing_add(rhs); . if unlikely!(b) {None} else {Some(a)} . } . . /// Unchecked integer addition. Computes `self + rhs`, assuming overflow . /// cannot occur. . /// . /// # Safety . /// -- line 446 ---------------------------------------- -- line 456 ---------------------------------------- . )] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")] . #[inline(always)] . pub const unsafe fn unchecked_add(self, rhs: Self) -> Self { . // SAFETY: the caller must uphold the safety contract for . // `unchecked_add`. 666,845 ( 0.02%) unsafe { intrinsics::unchecked_add(self, rhs) } . } . . /// Checked addition with a signed integer. Computes `self + rhs`, . /// returning `None` if overflow occurred. . /// . /// # Examples . /// . /// Basic usage: -- line 472 ---------------------------------------- -- line 525 ---------------------------------------- . )] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")] . #[inline(always)] . pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self { . // SAFETY: the caller must uphold the safety contract for . // `unchecked_sub`. 37,034 ( 0.00%) unsafe { intrinsics::unchecked_sub(self, rhs) } . } . . /// Checked integer multiplication. Computes `self * rhs`, returning . /// `None` if overflow occurred. . /// . /// # Examples . /// . /// Basic usage: -- line 541 ---------------------------------------- -- line 596 ---------------------------------------- . without modifying the original"] . #[inline] . pub const fn checked_div(self, rhs: Self) -> Option { . if unlikely!(rhs == 0) { . None . } else { . // SAFETY: div by zero has been checked above and unsigned types have no other . // failure modes for division 976 ( 0.00%) Some(unsafe { intrinsics::unchecked_div(self, rhs) }) . } . } . . /// Checked Euclidean division. Computes `self.div_euclid(rhs)`, returning `None` . /// if `rhs == 0`. . /// . /// # Examples . /// -- line 612 ---------------------------------------- -- line 1035 ---------------------------------------- . #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_add(127), ", stringify!($SelfT), "::MAX);")] . /// ``` . #[stable(feature = "rust1", since = "1.0.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")] . #[inline(always)] . pub const fn saturating_add(self, rhs: Self) -> Self { 146,118 ( 0.00%) intrinsics::saturating_add(self, rhs) . } . . /// Saturating addition with a signed integer. Computes `self + rhs`, . /// saturating at the numeric bounds instead of overflowing. . /// . /// # Examples . /// . /// Basic usage: -- line 1051 ---------------------------------------- -- line 1084 ---------------------------------------- . #[doc = concat!("assert_eq!(13", stringify!($SelfT), ".saturating_sub(127), 0);")] . /// ``` . #[stable(feature = "rust1", since = "1.0.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")] . #[inline(always)] . pub const fn saturating_sub(self, rhs: Self) -> Self { 4,821 ( 0.00%) intrinsics::saturating_sub(self, rhs) . } . . /// Saturating integer multiplication. Computes `self * rhs`, . /// saturating at the numeric bounds instead of overflowing. . /// . /// # Examples . /// . /// Basic usage: -- line 1100 ---------------------------------------- -- line 1175 ---------------------------------------- . #[doc = concat!("assert_eq!(200", stringify!($SelfT), ".wrapping_add(", stringify!($SelfT), "::MAX), 199);")] . /// ``` . #[stable(feature = "rust1", since = "1.0.0")] . #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline(always)] . pub const fn wrapping_add(self, rhs: Self) -> Self { 4,093,344 ( 0.13%) intrinsics::wrapping_add(self, rhs) . } . . /// Wrapping (modular) addition with a signed integer. Computes . /// `self + rhs`, wrapping around at the boundary of the type. . /// . /// # Examples . /// . /// Basic usage: -- line 1191 ---------------------------------------- -- line 1217 ---------------------------------------- . #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_sub(", stringify!($SelfT), "::MAX), 101);")] . /// ``` . #[stable(feature = "rust1", since = "1.0.0")] . #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline(always)] . pub const fn wrapping_sub(self, rhs: Self) -> Self { 1,342,856 ( 0.04%) intrinsics::wrapping_sub(self, rhs) . } . . /// Wrapping (modular) multiplication. Computes `self * . /// rhs`, wrapping around at the boundary of the type. . /// . /// # Examples . /// . /// Basic usage: -- line 1233 ---------------------------------------- -- line 1240 ---------------------------------------- . /// assert_eq!(25u8.wrapping_mul(12), 44); . /// ``` . #[stable(feature = "rust1", since = "1.0.0")] . #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline(always)] . pub const fn wrapping_mul(self, rhs: Self) -> Self { 4,318,228 ( 0.13%) intrinsics::wrapping_mul(self, rhs) . } . . /// Wrapping (modular) division. Computes `self / rhs`. . /// Wrapped division on unsigned types is just normal division. . /// There's no way wrapping could ever happen. . /// This function exists, so that all operations . /// are accounted for in the wrapping operations. . /// -- line 1256 ---------------------------------------- -- line 1491 ---------------------------------------- . #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false));")] . #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (0, true));")] . /// ``` . #[stable(feature = "wrapping", since = "1.7.0")] . #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline(always)] 10 ( 0.00%) pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) { 591,665 ( 0.02%) let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT); . (a as Self, b) 20 ( 0.00%) } . . /// Calculates `self + rhs + carry` without the ability to overflow. . /// . /// Performs "ternary addition" which takes in an extra bit to add, and may return an . /// additional bit of overflow. This allows for chaining together multiple additions . /// to create "big integers" which represent larger values. . /// . #[doc = concat!("This can be thought of as a ", stringify!($BITS), "-bit \"full adder\", in the electronics sense.")] -- line 1510 ---------------------------------------- -- line 1587 ---------------------------------------- . #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_sub(2), (3, false));")] . #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".overflowing_sub(1), (", stringify!($SelfT), "::MAX, true));")] . /// ``` . #[stable(feature = "wrapping", since = "1.7.0")] . #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline(always)] 6 ( 0.00%) pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) { 122,130 ( 0.00%) let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT); . (a as Self, b) 12 ( 0.00%) } . . /// Calculates `self - rhs - borrow` without the ability to overflow. . /// . /// Performs "ternary subtraction" which takes in an extra bit to subtract, and may return . /// an additional bit of overflow. This allows for chaining together multiple subtractions . /// to create "big integers" which represent larger values. . /// . /// # Examples -- line 1606 ---------------------------------------- -- line 1674 ---------------------------------------- . /// assert_eq!(1_000_000_000u32.overflowing_mul(10), (1410065408, true)); . /// ``` . #[stable(feature = "wrapping", since = "1.7.0")] . #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline(always)] . pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) { 1,180,135 ( 0.04%) let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT); . (a as Self, b) . } . . /// Calculates the divisor when `self` is divided by `rhs`. . /// . /// Returns a tuple of the divisor along with a boolean indicating . /// whether an arithmetic overflow would occur. Note that for unsigned . /// integers overflow never occurs, so the second value is always -- line 1690 ---------------------------------------- -- line 2132 ---------------------------------------- . #[doc = concat!("assert!(16", stringify!($SelfT), ".is_power_of_two());")] . #[doc = concat!("assert!(!10", stringify!($SelfT), ".is_power_of_two());")] . /// ``` . #[must_use] . #[stable(feature = "rust1", since = "1.0.0")] . #[rustc_const_stable(feature = "const_is_power_of_two", since = "1.32.0")] . #[inline(always)] . pub const fn is_power_of_two(self) -> bool { 180 ( 0.00%) self.count_ones() == 1 . } . . // Returns one less than next power of two. . // (For 8u8 next power of two is 8u8 and for 6u8 it is 8u8) . // . // 8u8.one_less_than_next_power_of_two() == 7 . // 6u8.one_less_than_next_power_of_two() == 7 . // . // This method cannot overflow, as in the `next_power_of_two` . // overflow cases it instead ends up returning the maximum value . // of the type, and can return 0 for 0. . #[inline] . #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")] . const fn one_less_than_next_power_of_two(self) -> Self { 3,144 ( 0.00%) if self <= 1 { return 0; } . 12,355 ( 0.00%) let p = self - 1; . // SAFETY: Because `p > 0`, it cannot consist entirely of leading zeros. . // That means the shift is always in-bounds, and some processors . // (such as intel pre-haswell) have more efficient ctlz . // intrinsics when the argument is non-zero. 36,974 ( 0.00%) let z = unsafe { intrinsics::ctlz_nonzero(p) }; 12,390 ( 0.00%) <$SelfT>::MAX >> z . } . . /// Returns the smallest power of two greater than or equal to `self`. . /// . /// When return value overflows (i.e., `self > (1 << (N-1))` for type . /// `uN`), it panics in debug mode and the return value is wrapped to 0 in . /// release mode (the only situation in which method can return 0). . /// -- line 2171 ---------------------------------------- -- line 2179 ---------------------------------------- . /// ``` . #[stable(feature = "rust1", since = "1.0.0")] . #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")] . #[must_use = "this returns the result of the operation, \ . without modifying the original"] . #[inline] . #[rustc_inherit_overflow_checks] . pub const fn next_power_of_two(self) -> Self { 21,538 ( 0.00%) self.one_less_than_next_power_of_two() + 1 . } . . /// Returns the smallest power of two greater than or equal to `n`. If . /// the next power of two is greater than the type's maximum value, . /// `None` is returned, otherwise the power of two is wrapped in `Some`. . /// . /// # Examples . /// -- line 2195 ---------------------------------------- 84,309 ( 0.00%) -------------------------------------------------------------------------------- -- Auto-annotated source: /usr/home/liquid/.cargo/registry/src/github.com-1ecc6299db9ec823/hashbrown-0.12.0/src/raw/mod.rs -------------------------------------------------------------------------------- Ir -- line 111 ---------------------------------------- . const EMPTY: u8 = 0b1111_1111; . . /// Control byte value for a deleted bucket. . const DELETED: u8 = 0b1000_0000; . . /// Checks whether a control byte represents a full bucket (top bit is clear). . #[inline] . fn is_full(ctrl: u8) -> bool { 1,317,232 ( 0.04%) ctrl & 0x80 == 0 . } . . /// Checks whether a control byte represents a special value (top bit is set). . #[inline] . fn is_special(ctrl: u8) -> bool { . ctrl & 0x80 != 0 . } . . /// Checks whether a special control value is EMPTY (just check 1 bit). . #[inline] . fn special_is_empty(ctrl: u8) -> bool { . debug_assert!(is_special(ctrl)); 112,840 ( 0.00%) ctrl & 0x01 != 0 . } . . /// Primary hash function, used to select the initial bucket to probe from. . #[inline] . #[allow(clippy::cast_possible_truncation)] . fn h1(hash: u64) -> usize { . // On 32-bit platforms we simply ignore the higher hash bits. . hash as usize -- line 140 ---------------------------------------- -- line 143 ---------------------------------------- . /// Secondary hash function, saved in the low 7 bits of the control byte. . #[inline] . #[allow(clippy::cast_possible_truncation)] . fn h2(hash: u64) -> u8 { . // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit . // value, some hash functions (such as FxHash) produce a usize result . // instead, which means that the top 32 bits are 0 on 32-bit platforms. . let hash_len = usize::min(mem::size_of::(), mem::size_of::()); 17,870,976 ( 0.55%) let top7 = hash >> (hash_len * 8 - 7); . (top7 & 0x7f) as u8 // truncation . } . . /// Probe sequence based on triangular numbers, which is guaranteed (since our . /// table size is a power of two) to visit every group of elements exactly once. . /// . /// A triangular probe has us jump by 1 more group every time. So first we . /// jump by 1 group (meaning we just continue our linear scan), then 2 groups -- line 159 ---------------------------------------- -- line 170 ---------------------------------------- . #[inline] . fn move_next(&mut self, bucket_mask: usize) { . // We should have found an empty bucket by now and ended the probe. . debug_assert!( . self.stride <= bucket_mask, . "Went past end of probe sequence" . ); . 123,386 ( 0.00%) self.stride += Group::WIDTH; 123,386 ( 0.00%) self.pos += self.stride; 101,799 ( 0.00%) self.pos &= bucket_mask; . } . } . . /// Returns the number of buckets needed to hold the given number of items, . /// taking the maximum load factor into account. . /// . /// Returns `None` if an overflow occurs. . // Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258 . #[cfg_attr(target_os = "emscripten", inline(never))] . #[cfg_attr(not(target_os = "emscripten"), inline)] . fn capacity_to_buckets(cap: usize) -> Option { . debug_assert_ne!(cap, 0); . . // For small tables we require at least 1 empty bucket so that lookups are . // guaranteed to terminate if an element doesn't exist in the table. 106,424 ( 0.00%) if cap < 8 { . // We don't bother with a table size of 2 buckets since that can only . // hold a single element. Instead we skip directly to a 4 bucket table . // which can hold 3 elements. 234,370 ( 0.01%) return Some(if cap < 4 { 4 } else { 8 }); . } . . // Otherwise require 1/8 buckets to be empty (87.5% load) . // . // Be careful when modifying this, calculate_layout relies on the . // overflow check here. 38,028 ( 0.00%) let adjusted_cap = cap.checked_mul(8)? / 7; . . // Any overflows will have been caught by the checked_mul. Also, any . // rounding errors from the division above will be cleaned up by . // next_power_of_two (which can't overflow because of the previous division). . Some(adjusted_cap.next_power_of_two()) . } . . /// Returns the maximum effective capacity for the given bucket mask, taking . /// the maximum load factor into account. . #[inline] . fn bucket_mask_to_capacity(bucket_mask: usize) -> usize { 269,408 ( 0.01%) if bucket_mask < 8 { . // For tables with 1/2/4/8 buckets, we always reserve one empty slot. . // Keep in mind that the bucket mask is one less than the bucket count. . bucket_mask . } else { . // For larger tables we reserve 12.5% of the slots as empty. 46,106 ( 0.00%) ((bucket_mask + 1) / 8) * 7 . } . } . . /// Helper which allows the max calculation for ctrl_align to be statically computed for each T . /// while keeping the rest of `calculate_layout_for` independent of `T` . #[derive(Copy, Clone)] . struct TableLayout { . size: usize, -- line 233 ---------------------------------------- -- line 246 ---------------------------------------- . . #[inline] . fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> { . debug_assert!(buckets.is_power_of_two()); . . let TableLayout { size, ctrl_align } = self; . // Manual layout calculation since Layout methods are not yet stable. . let ctrl_offset = 220,266 ( 0.01%) size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1); 292,994 ( 0.01%) let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?; . . Some(( . unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }, . ctrl_offset, . )) . } . } . -- line 263 ---------------------------------------- -- line 337 ---------------------------------------- . } . } . #[cfg_attr(feature = "inline-more", inline)] . pub unsafe fn drop(&self) { . self.as_ptr().drop_in_place(); . } . #[inline] . pub unsafe fn read(&self) -> T { 372 ( 0.00%) self.as_ptr().read() . } . #[inline] . pub unsafe fn write(&self, val: T) { . self.as_ptr().write(val); . } . #[inline] . pub unsafe fn as_ref<'a>(&self) -> &'a T { . &*self.as_ptr() -- line 353 ---------------------------------------- -- line 422 ---------------------------------------- . /// Creates a new empty hash table without allocating any memory, using the . /// given allocator. . /// . /// In effect this returns a table with exactly 1 bucket. However we can . /// leave the data pointer dangling since that bucket is never written to . /// due to our load factor forcing us to always have at least 1 free bucket. . #[inline] . pub fn new_in(alloc: A) -> Self { 1,677 ( 0.00%) Self { . table: RawTableInner::new_in(alloc), . marker: PhantomData, . } . } . . /// Allocates a new hash table with the given number of buckets. . /// . /// The control bytes are left uninitialized. -- line 438 ---------------------------------------- -- line 440 ---------------------------------------- . unsafe fn new_uninitialized( . alloc: A, . buckets: usize, . fallibility: Fallibility, . ) -> Result { . debug_assert!(buckets.is_power_of_two()); . . Ok(Self { 45 ( 0.00%) table: RawTableInner::new_uninitialized( . alloc, . TableLayout::new::(), . buckets, . fallibility, . )?, . marker: PhantomData, . }) . } -- line 456 ---------------------------------------- -- line 458 ---------------------------------------- . /// Attempts to allocate a new hash table with at least enough capacity . /// for inserting the given number of elements without reallocating. . fn fallible_with_capacity( . alloc: A, . capacity: usize, . fallibility: Fallibility, . ) -> Result { . Ok(Self { 7,204 ( 0.00%) table: RawTableInner::fallible_with_capacity( . alloc, . TableLayout::new::(), . capacity, . fallibility, . )?, . marker: PhantomData, . }) . } -- line 474 ---------------------------------------- -- line 527 ---------------------------------------- . debug_assert_ne!(self.table.bucket_mask, 0); . debug_assert!(index < self.buckets()); . Bucket::from_base_index(self.data_end(), index) . } . . /// Erases an element from the table without dropping it. . #[cfg_attr(feature = "inline-more", inline)] . #[deprecated(since = "0.8.1", note = "use erase or remove instead")] 11,539 ( 0.00%) pub unsafe fn erase_no_drop(&mut self, item: &Bucket) { 11,539 ( 0.00%) let index = self.bucket_index(item); . self.table.erase(index); 23,078 ( 0.00%) } . . /// Erases an element from the table, dropping it in place. . #[cfg_attr(feature = "inline-more", inline)] . #[allow(clippy::needless_pass_by_value)] . #[allow(deprecated)] . pub unsafe fn erase(&mut self, item: Bucket) { . // Erase the element from the table first since drop might panic. 4,074 ( 0.00%) self.erase_no_drop(&item); . item.drop(); . } . . /// Finds and erases an element from the table, dropping it in place. . /// Returns true if an element was found. . #[cfg(feature = "raw")] . #[cfg_attr(feature = "inline-more", inline)] . pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool { -- line 554 ---------------------------------------- -- line 563 ---------------------------------------- . } . } . . /// Removes an element from the table, returning it. . #[cfg_attr(feature = "inline-more", inline)] . #[allow(clippy::needless_pass_by_value)] . #[allow(deprecated)] . pub unsafe fn remove(&mut self, item: Bucket) -> T { 19,004 ( 0.00%) self.erase_no_drop(&item); 176 ( 0.00%) item.read() . } . . /// Finds and removes an element from the table, returning it. . #[cfg_attr(feature = "inline-more", inline)] 504,212 ( 0.02%) pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { . // Avoid `Option::map` because it bloats LLVM IR. 1,816 ( 0.00%) match self.find(hash, eq) { 5,890 ( 0.00%) Some(bucket) => Some(unsafe { self.remove(bucket) }), 93,733 ( 0.00%) None => None, . } 742,372 ( 0.02%) } . . /// Marks all table buckets as empty without dropping their contents. . #[cfg_attr(feature = "inline-more", inline)] . pub fn clear_no_drop(&mut self) { . self.table.clear_no_drop(); . } . . /// Removes all elements from the table without freeing the backing memory. . #[cfg_attr(feature = "inline-more", inline)] . pub fn clear(&mut self) { . // Ensure that the table is reset even if one of the drops panic . let mut self_ = guard(self, |self_| self_.clear_no_drop()); . unsafe { 1 ( 0.00%) self_.drop_elements(); . } . } . 7 ( 0.00%) unsafe fn drop_elements(&mut self) { 13,982 ( 0.00%) if mem::needs_drop::() && !self.is_empty() { . for item in self.iter() { . item.drop(); . } . } 8 ( 0.00%) } . . /// Shrinks the table to fit `max(self.len(), min_size)` elements. . #[cfg_attr(feature = "inline-more", inline)] . pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) { . // Calculate the minimal number of elements that we need to reserve . // space for. . let min_size = usize::max(self.table.items, min_size); . if min_size == 0 { -- line 615 ---------------------------------------- -- line 642 ---------------------------------------- . } . } . } . . /// Ensures that at least `additional` items can be inserted into the table . /// without reallocation. . #[cfg_attr(feature = "inline-more", inline)] . pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { 280,187 ( 0.01%) if additional > self.table.growth_left { . // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. 177,666 ( 0.01%) if self . .reserve_rehash(additional, hasher, Fallibility::Infallible) . .is_err() . { . unsafe { hint::unreachable_unchecked() } . } . } . } . -- line 660 ---------------------------------------- -- line 671 ---------------------------------------- . } else { . Ok(()) . } . } . . /// Out-of-line slow path for `reserve` and `try_reserve`. . #[cold] . #[inline(never)] 429,545 ( 0.01%) fn reserve_rehash( . &mut self, . additional: usize, . hasher: impl Fn(&T) -> u64, . fallibility: Fallibility, . ) -> Result<(), TryReserveError> { . unsafe { . self.table.reserve_rehash_inner( . additional, -- line 687 ---------------------------------------- -- line 690 ---------------------------------------- . TableLayout::new::(), . if mem::needs_drop::() { . Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) . } else { . None . }, . ) . } 319,624 ( 0.01%) } . . /// Allocates a new table of a different size and moves the contents of the . /// current table into it. . fn resize( . &mut self, . capacity: usize, . hasher: impl Fn(&T) -> u64, . fallibility: Fallibility, -- line 706 ---------------------------------------- -- line 714 ---------------------------------------- . ) . } . } . . /// Inserts a new element into the table, and returns its raw bucket. . /// . /// This does not check if the given element already exists in the table. . #[cfg_attr(feature = "inline-more", inline)] 1,973,830 ( 0.06%) pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { . unsafe { . let mut index = self.table.find_insert_slot(hash); . . // We can avoid growing the table once we have reached our load . // factor if we are replacing a tombstone. This works since the . // number of EMPTY slots does not change in this case. 4,639 ( 0.00%) let old_ctrl = *self.table.ctrl(index); 1,213,324 ( 0.04%) if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) { . self.reserve(1, hasher); . index = self.table.find_insert_slot(hash); . } . . self.table.record_item_insert_at(index, old_ctrl, hash); . . let bucket = self.bucket(index); 4 ( 0.00%) bucket.write(value); . bucket . } 1,451,244 ( 0.04%) } . . /// Attempts to insert a new element without growing the table and return its raw bucket. . /// . /// Returns an `Err` containing the given element if inserting it would require growing the . /// table. . /// . /// This does not check if the given element already exists in the table. . #[cfg(feature = "raw")] -- line 749 ---------------------------------------- -- line 760 ---------------------------------------- . } . } . } . . /// Inserts a new element into the table, and returns a mutable reference to it. . /// . /// This does not check if the given element already exists in the table. . #[cfg_attr(feature = "inline-more", inline)] 307,656 ( 0.01%) pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T { 99 ( 0.00%) unsafe { self.insert(hash, value, hasher).as_mut() } 230,742 ( 0.01%) } . . /// Inserts a new element into the table, without growing the table. . /// . /// There must be enough space in the table to insert the new element. . /// . /// This does not check if the given element already exists in the table. . #[cfg_attr(feature = "inline-more", inline)] . #[cfg(any(feature = "raw", feature = "rustc-internal-api"))] 849 ( 0.00%) pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket { 329,297 ( 0.01%) let (index, old_ctrl) = self.table.prepare_insert_slot(hash); 4,430 ( 0.00%) let bucket = self.table.bucket(index); . . // If we are replacing a DELETED entry then we don't need to update . // the load counter. 676,727 ( 0.02%) self.table.growth_left -= special_is_empty(old_ctrl) as usize; . . bucket.write(value); 537,363 ( 0.02%) self.table.items += 1; . bucket 1,498 ( 0.00%) } . . /// Temporary removes a bucket, applying the given function to the removed . /// element and optionally put back the returned value in the same bucket. . /// . /// Returns `true` if the bucket still contains an element . /// . /// This does not check if the given bucket is actually occupied. . #[cfg_attr(feature = "inline-more", inline)] -- line 798 ---------------------------------------- -- line 813 ---------------------------------------- . true . } else { . false . } . } . . /// Searches for an element in the table. . #[inline] 31,516 ( 0.00%) pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { 4,681 ( 0.00%) let result = self.table.find_inner(hash, &mut |index| unsafe { 18,530 ( 0.00%) eq(self.bucket(index).as_ref()) 755 ( 0.00%) }); . . // Avoid `Option::map` because it bloats LLVM IR. . match result { 383 ( 0.00%) Some(index) => Some(unsafe { self.bucket(index) }), . None => None, . } 35,456 ( 0.00%) } . . /// Gets a reference to an element in the table. . #[inline] . pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { . // Avoid `Option::map` because it bloats LLVM IR. 14,204 ( 0.00%) match self.find(hash, eq) { . Some(bucket) => Some(unsafe { bucket.as_ref() }), . None => None, . } . } . . /// Gets a mutable reference to an element in the table. . #[inline] 1,505 ( 0.00%) pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { . // Avoid `Option::map` because it bloats LLVM IR. 14,073 ( 0.00%) match self.find(hash, eq) { . Some(bucket) => Some(unsafe { bucket.as_mut() }), . None => None, . } 1,720 ( 0.00%) } . . /// Attempts to get mutable references to `N` entries in the table at once. . /// . /// Returns an array of length `N` with the results of each query. . /// . /// At most one mutable reference will be returned to any entry. `None` will be returned if any . /// of the hashes are duplicates. `None` will be returned if the hash is not found. . /// -- line 859 ---------------------------------------- -- line 920 ---------------------------------------- . #[inline] . pub fn len(&self) -> usize { . self.table.items . } . . /// Returns `true` if the table contains no elements. . #[inline] . pub fn is_empty(&self) -> bool { 517,149 ( 0.02%) self.len() == 0 . } . . /// Returns the number of buckets in the table. . #[inline] . pub fn buckets(&self) -> usize { . self.table.bucket_mask + 1 . } . -- line 936 ---------------------------------------- -- line 938 ---------------------------------------- . /// the caller to ensure that the `RawTable` outlives the `RawIter`. . /// Because we cannot make the `next` method unsafe on the `RawIter` . /// struct, we have to make the `iter` method unsafe. . #[inline] . pub unsafe fn iter(&self) -> RawIter { . let data = Bucket::from_base_index(self.data_end(), 0); . RawIter { . iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()), 88,973 ( 0.00%) items: self.table.items, . } . } . . /// Returns an iterator over occupied buckets that could match a given hash. . /// . /// `RawTable` only stores 7 bits of the hash value, so this iterator may . /// return items that have a hash value different than the one provided. You . /// should always validate the returned values before using them. -- line 954 ---------------------------------------- -- line 995 ---------------------------------------- . /// Iteration starts at the provided iterator's current location. . /// . /// It is up to the caller to ensure that the iterator is valid for this . /// `RawTable` and covers all items that remain in the table. . pub unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { . debug_assert_eq!(iter.len(), self.len()); . . let alloc = self.table.alloc.clone(); 8,424 ( 0.00%) let allocation = self.into_allocation(); 6,318 ( 0.00%) RawIntoIter { 10,530 ( 0.00%) iter, . allocation, . marker: PhantomData, . alloc, . } . } . . /// Converts the table into a raw allocation. The contents of the table . /// should be dropped using a `RawIter` before freeing the allocation. . #[cfg_attr(feature = "inline-more", inline)] . pub(crate) fn into_allocation(self) -> Option<(NonNull, Layout)> { 4,321 ( 0.00%) let alloc = if self.table.is_empty_singleton() { . None . } else { . // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. . let (layout, ctrl_offset) = match calculate_layout::(self.table.buckets()) { . Some(lco) => lco, . None => unsafe { hint::unreachable_unchecked() }, . }; . Some(( 682 ( 0.00%) unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }, . layout, . )) . }; . mem::forget(self); . alloc . } . } . -- line 1033 ---------------------------------------- -- line 1042 ---------------------------------------- . T: Sync, . A: Sync, . { . } . . impl RawTableInner { . #[inline] . const fn new_in(alloc: A) -> Self { 591,649 ( 0.02%) Self { . // Be careful to cast the entire slice to a raw pointer. . ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, . bucket_mask: 0, . items: 0, . growth_left: 0, . alloc, . } . } . } . . impl RawTableInner { . #[cfg_attr(feature = "inline-more", inline)] 404,191 ( 0.01%) unsafe fn new_uninitialized( . alloc: A, . table_layout: TableLayout, . buckets: usize, . fallibility: Fallibility, . ) -> Result { . debug_assert!(buckets.is_power_of_two()); . . // Avoid `Option::ok_or_else` because it bloats LLVM IR. -- line 1071 ---------------------------------------- -- line 1078 ---------------------------------------- . // exceed `isize::MAX`. We can skip this check on 64-bit systems since . // such allocations will never succeed anyways. . // . // This mirrors what Vec does in the standard library. . if mem::size_of::() < 8 && layout.size() > isize::MAX as usize { . return Err(fallibility.capacity_overflow()); . } . 90,976 ( 0.00%) let ptr: NonNull = match do_alloc(&alloc, layout) { . Ok(block) => block.cast(), . Err(_) => return Err(fallibility.alloc_err(layout)), . }; . . let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); 223,302 ( 0.01%) Ok(Self { . ctrl, 90,438 ( 0.00%) bucket_mask: buckets - 1, . items: 0, . growth_left: bucket_mask_to_capacity(buckets - 1), . alloc, . }) 294,272 ( 0.01%) } . . #[inline] 24,024 ( 0.00%) fn fallible_with_capacity( . alloc: A, . table_layout: TableLayout, . capacity: usize, . fallibility: Fallibility, . ) -> Result { 6,366 ( 0.00%) if capacity == 0 { 6,497 ( 0.00%) Ok(Self::new_in(alloc)) . } else { . unsafe { . let buckets = . capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?; . 184,066 ( 0.01%) let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?; . result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes()); . 10,726 ( 0.00%) Ok(result) . } . } 24,024 ( 0.00%) } . . /// Searches for an empty or deleted bucket which is suitable for inserting . /// a new element and sets the hash for that slot. . /// . /// There must be at least 1 empty bucket in the table. . #[inline] 112,141 ( 0.00%) unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) { . let index = self.find_insert_slot(hash); 112,141 ( 0.00%) let old_ctrl = *self.ctrl(index); . self.set_ctrl_h2(index, hash); . (index, old_ctrl) 224,282 ( 0.01%) } . . /// Searches for an empty or deleted bucket which is suitable for inserting . /// a new element. . /// . /// There must be at least 1 empty bucket in the table. . #[inline] . fn find_insert_slot(&self, hash: u64) -> usize { . let mut probe_seq = self.probe_seq(hash); . loop { . unsafe { . let group = Group::load(self.ctrl(probe_seq.pos)); 855,410 ( 0.03%) if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() { 2,514,830 ( 0.08%) let result = (probe_seq.pos + bit) & self.bucket_mask; . . // In tables smaller than the group width, trailing control . // bytes outside the range of the table are filled with . // EMPTY entries. These will unfortunately trigger a . // match, but once masked may point to a full bucket that . // is already occupied. We detect this situation here and . // perform a second scan starting at the beginning of the . // table. This second scan is guaranteed to find an empty . // slot (due to the load factor) before hitting the trailing . // control bytes (containing EMPTY). 1,141,192 ( 0.04%) if unlikely(is_full(*self.ctrl(result))) { . debug_assert!(self.bucket_mask < Group::WIDTH); . debug_assert_ne!(probe_seq.pos, 0); . return Group::load_aligned(self.ctrl(0)) . .match_empty_or_deleted() . .lowest_set_bit_nonzero(); . } . . return result; -- line 1165 ---------------------------------------- -- line 1171 ---------------------------------------- . . /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of . /// code generated, but it is eliminated by LLVM optimizations. . #[inline] . fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option { . let h2_hash = h2(hash); . let mut probe_seq = self.probe_seq(hash); . 67,240 ( 0.00%) loop { . let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; . 2,276,204 ( 0.07%) for bit in group.match_byte(h2_hash) { 4,229,487 ( 0.13%) let index = (probe_seq.pos + bit) & self.bucket_mask; . 3,140,848 ( 0.10%) if likely(eq(index)) { . return Some(index); . } . } . 756,567 ( 0.02%) if likely(group.match_empty().any_bit_set()) { . return None; . } . . probe_seq.move_next(self.bucket_mask); . } . } . . #[allow(clippy::mut_mut)] -- line 1198 ---------------------------------------- -- line 1225 ---------------------------------------- . Bucket::from_base_index(self.data_end(), index) . } . . #[inline] . unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 { . debug_assert_ne!(self.bucket_mask, 0); . debug_assert!(index < self.buckets()); . let base: *mut u8 = self.data_end().as_ptr(); 3,049,984 ( 0.09%) base.sub((index + 1) * size_of) . } . . #[inline] . unsafe fn data_end(&self) -> NonNull { . NonNull::new_unchecked(self.ctrl.as_ptr().cast()) . } . . /// Returns an iterator-like object for a probe sequence on the table. . /// . /// This iterator never terminates, but is guaranteed to visit each bucket . /// group exactly once. The loop using `probe_seq` must terminate upon . /// reaching a group containing an empty bucket. . #[inline] . fn probe_seq(&self, hash: u64) -> ProbeSeq { . ProbeSeq { 13,950,625 ( 0.43%) pos: h1(hash) & self.bucket_mask, . stride: 0, . } . } . . /// Returns the index of a bucket for which a value must be inserted if there is enough rooom . /// in the table, otherwise returns error . #[cfg(feature = "raw")] . #[inline] -- line 1257 ---------------------------------------- -- line 1263 ---------------------------------------- . } else { . self.record_item_insert_at(index, old_ctrl, hash); . Ok(index) . } . } . . #[inline] . unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) { 1,516,593 ( 0.05%) self.growth_left -= special_is_empty(old_ctrl) as usize; . self.set_ctrl_h2(index, hash); 1,213,240 ( 0.04%) self.items += 1; . } . . #[inline] . fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool { . let probe_seq_pos = self.probe_seq(hash).pos; . let probe_index = . |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH; . probe_index(i) == probe_index(new_i) -- line 1281 ---------------------------------------- -- line 1312 ---------------------------------------- . // replicate the buckets at the end of the trailing group. For example . // with 2 buckets and a group size of 4, the control bytes will look . // like this: . // . // Real | Replicated . // --------------------------------------------- . // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] | . // --------------------------------------------- 2,393,550 ( 0.07%) let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; . 797,398 ( 0.02%) *self.ctrl(index) = ctrl; 797,614 ( 0.02%) *self.ctrl(index2) = ctrl; . } . . /// Returns a pointer to a control byte. . #[inline] . unsafe fn ctrl(&self, index: usize) -> *mut u8 { . debug_assert!(index < self.num_ctrl_bytes()); . self.ctrl.as_ptr().add(index) . } . . #[inline] . fn buckets(&self) -> usize { 266,201 ( 0.01%) self.bucket_mask + 1 . } . . #[inline] . fn num_ctrl_bytes(&self) -> usize { 205,942 ( 0.01%) self.bucket_mask + 1 + Group::WIDTH . } . . #[inline] . fn is_empty_singleton(&self) -> bool { 1,107,963 ( 0.03%) self.bucket_mask == 0 . } . . #[allow(clippy::mut_mut)] . #[inline] . unsafe fn prepare_resize( . &self, . table_layout: TableLayout, . capacity: usize, . fallibility: Fallibility, . ) -> Result, TryReserveError> { . debug_assert!(self.items <= capacity); . . // Allocate and initialize the new table. 3,786 ( 0.00%) let mut new_table = RawTableInner::fallible_with_capacity( . self.alloc.clone(), . table_layout, . capacity, . fallibility, . )?; 112,176 ( 0.00%) new_table.growth_left -= self.items; . new_table.items = self.items; . . // The hash function may panic, in which case we simply free the new . // table without dropping any elements that may have been copied into . // it. . // . // This guard is also used to free the old table on success, see . // the comment at the bottom of this function. . Ok(guard(new_table, move |self_| { 52,686 ( 0.00%) if !self_.is_empty_singleton() { . self_.free_buckets(table_layout); . } . })) . } . . /// Reserves or rehashes to make room for `additional` more elements. . /// . /// This uses dynamic dispatch to reduce the amount of -- line 1383 ---------------------------------------- -- line 1388 ---------------------------------------- . &mut self, . additional: usize, . hasher: &dyn Fn(&mut Self, usize) -> u64, . fallibility: Fallibility, . layout: TableLayout, . drop: Option, . ) -> Result<(), TryReserveError> { . // Avoid `Option::ok_or_else` because it bloats LLVM IR. 105,377 ( 0.00%) let new_items = match self.items.checked_add(additional) { . Some(new_items) => new_items, . None => return Err(fallibility.capacity_overflow()), . }; 105,372 ( 0.00%) let full_capacity = bucket_mask_to_capacity(self.bucket_mask); 218,986 ( 0.01%) if new_items <= full_capacity / 2 { . // Rehash in-place without re-allocating if we have plenty of spare . // capacity that is locked up due to DELETED entries. . self.rehash_in_place(hasher, layout.size, drop); . Ok(()) . } else { . // Otherwise, conservatively resize to at least the next size up . // to avoid churning deletes into frequent rehashes. . self.resize_inner( 52,686 ( 0.00%) usize::max(new_items, full_capacity + 1), . hasher, . fallibility, . layout, . ) . } . } . . /// Allocates a new table of a different size and moves the contents of the -- line 1418 ---------------------------------------- -- line 1424 ---------------------------------------- . #[inline(always)] . unsafe fn resize_inner( . &mut self, . capacity: usize, . hasher: &dyn Fn(&mut Self, usize) -> u64, . fallibility: Fallibility, . layout: TableLayout, . ) -> Result<(), TryReserveError> { 9,816 ( 0.00%) let mut new_table = self.prepare_resize(layout, capacity, fallibility)?; . . // Copy all elements to the new table. . for i in 0..self.buckets() { 479,350 ( 0.01%) if !is_full(*self.ctrl(i)) { . continue; . } . . // This may panic. . let hash = hasher(self, i); . . // We can use a simpler version of insert() here since: . // - there are no DELETED entries. -- line 1444 ---------------------------------------- -- line 1454 ---------------------------------------- . } . . // We successfully copied all elements without panicking. Now replace . // self with the new table. The old table will have its memory freed but . // the items will not be dropped (since they have been moved into the . // new table). . mem::swap(self, &mut new_table); . 52,686 ( 0.00%) Ok(()) . } . . /// Rehashes the contents of the table in place (i.e. without changing the . /// allocation). . /// . /// If `hasher` panics then some the table's contents may be lost. . /// . /// This uses dynamic dispatch to reduce the amount of -- line 1470 ---------------------------------------- -- line 1554 ---------------------------------------- . #[inline] . unsafe fn free_buckets(&mut self, table_layout: TableLayout) { . // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. . let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) { . Some(lco) => lco, . None => hint::unreachable_unchecked(), . }; . self.alloc.deallocate( 35,854 ( 0.00%) NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)), . layout, . ); . } . . /// Marks all table buckets as empty without dropping their contents. . #[inline] . fn clear_no_drop(&mut self) { 6,119 ( 0.00%) if !self.is_empty_singleton() { . unsafe { . self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes()); . } . } 9,408 ( 0.00%) self.items = 0; 6,156 ( 0.00%) self.growth_left = bucket_mask_to_capacity(self.bucket_mask); . } . . #[inline] . unsafe fn erase(&mut self, index: usize) { . debug_assert!(is_full(*self.ctrl(index))); 100,247 ( 0.00%) let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask; . let empty_before = Group::load(self.ctrl(index_before)).match_empty(); . let empty_after = Group::load(self.ctrl(index)).match_empty(); . . // If we are inside a continuous block of Group::WIDTH full or deleted . // cells then a probe window may have seen a full block when trying to . // insert. We therefore need to keep that block non-empty so that . // lookups will continue searching to the next probe window. . // . // Note that in this context `leading_zeros` refers to the bytes at the . // end of a group, while `trailing_zeros` refers to the bytes at the . // beginning of a group. 400,988 ( 0.01%) let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH { . DELETED . } else { 498,755 ( 0.02%) self.growth_left += 1; . EMPTY . }; . self.set_ctrl(index, ctrl); 400,988 ( 0.01%) self.items -= 1; . } . } . . impl Clone for RawTable { 4,488 ( 0.00%) fn clone(&self) -> Self { 568 ( 0.00%) if self.table.is_empty_singleton() { . Self::new_in(self.table.alloc.clone()) . } else { . unsafe { . let mut new_table = ManuallyDrop::new( . // Avoid `Result::ok_or_else` because it bloats LLVM IR. . match Self::new_uninitialized( . self.table.alloc.clone(), . self.table.buckets(), -- line 1615 ---------------------------------------- -- line 1624 ---------------------------------------- . // We need to free the memory allocated for the new table. . new_table.free_buckets(); . }); . . // Return the newly created table. . ManuallyDrop::into_inner(new_table) . } . } 5,049 ( 0.00%) } . . fn clone_from(&mut self, source: &Self) { . if source.table.is_empty_singleton() { . *self = Self::new_in(self.table.alloc.clone()); . } else { . unsafe { . // First, drop all our elements without clearing the control bytes. . self.drop_elements(); -- line 1640 ---------------------------------------- -- line 1687 ---------------------------------------- . .table . .ctrl(0) . .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); . source . .data_start() . .copy_to_nonoverlapping(self.data_start(), self.table.buckets()); . . self.table.items = source.table.items; 18 ( 0.00%) self.table.growth_left = source.table.growth_left; . } . } . . impl RawTable { . /// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`. . #[cfg_attr(feature = "inline-more", inline)] . unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) { . // Copy the control bytes unchanged. We do this in a single pass -- line 1703 ---------------------------------------- -- line 1790 ---------------------------------------- . fn default() -> Self { . Self::new_in(Default::default()) . } . } . . #[cfg(feature = "nightly")] . unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable { . #[cfg_attr(feature = "inline-more", inline)] 644,689 ( 0.02%) fn drop(&mut self) { 482,902 ( 0.01%) if !self.table.is_empty_singleton() { . unsafe { . self.drop_elements(); . self.free_buckets(); . } . } 691,408 ( 0.02%) } . } . #[cfg(not(feature = "nightly"))] . impl Drop for RawTable { . #[cfg_attr(feature = "inline-more", inline)] . fn drop(&mut self) { . if !self.table.is_empty_singleton() { . unsafe { . self.drop_elements(); -- line 1813 ---------------------------------------- -- line 1817 ---------------------------------------- . } . } . . impl IntoIterator for RawTable { . type Item = T; . type IntoIter = RawIntoIter; . . #[cfg_attr(feature = "inline-more", inline)] 8,424 ( 0.00%) fn into_iter(self) -> RawIntoIter { . unsafe { . let iter = self.iter(); . self.into_iter_from(iter) . } 10,530 ( 0.00%) } . } . . /// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does . /// not track an item count. . pub(crate) struct RawIterRange { . // Mask of full buckets in the current group. Bits are cleared from this . // mask as each element is processed. . current_group: BitMask, -- line 1838 ---------------------------------------- -- line 1934 ---------------------------------------- . . impl Iterator for RawIterRange { . type Item = Bucket; . . #[cfg_attr(feature = "inline-more", inline)] . fn next(&mut self) -> Option> { . unsafe { . loop { 178,411 ( 0.01%) if let Some(index) = self.current_group.lowest_set_bit() { 27,075 ( 0.00%) self.current_group = self.current_group.remove_lowest_bit(); 46,234 ( 0.00%) return Some(self.data.next_n(index)); . } . 166,045 ( 0.01%) if self.next_ctrl >= self.end { . return None; . } . . // We might read past self.end up to the next group boundary, . // but this is fine because it only occurs on tables smaller . // than the group size where the trailing control bytes are all . // EMPTY. On larger tables self.end is guaranteed to be aligned . // to the group size (since tables are power-of-two sized). 4,070 ( 0.00%) self.current_group = Group::load_aligned(self.next_ctrl).match_full(); 3,339 ( 0.00%) self.data = self.data.next_n(Group::WIDTH); 7,826 ( 0.00%) self.next_ctrl = self.next_ctrl.add(Group::WIDTH); . } . } . } . . #[inline] . fn size_hint(&self) -> (usize, Option) { . // We don't have an item count, so just guess based on the range size. . ( -- line 1966 ---------------------------------------- -- line 2102 ---------------------------------------- . } . } else { . // We must have already iterated past the removed item. . } . } . } . . unsafe fn drop_elements(&mut self) { 1,793 ( 0.00%) if mem::needs_drop::() && self.len() != 0 { . for item in self { . item.drop(); . } . } . } . } . . impl Clone for RawIter { -- line 2118 ---------------------------------------- -- line 2124 ---------------------------------------- . } . } . } . . impl Iterator for RawIter { . type Item = Bucket; . . #[cfg_attr(feature = "inline-more", inline)] 32,814 ( 0.00%) fn next(&mut self) -> Option> { 77,218 ( 0.00%) if let Some(b) = self.iter.next() { 205,604 ( 0.01%) self.items -= 1; . Some(b) . } else { . // We don't check against items == 0 here to allow the . // compiler to optimize away the item count entirely if the . // iterator length is never queried. . debug_assert_eq!(self.items, 0); . None . } 65,628 ( 0.00%) } . . #[inline] . fn size_hint(&self) -> (usize, Option) { . (self.items, Some(self.items)) . } . } . . impl ExactSizeIterator for RawIter {} -- line 2151 ---------------------------------------- -- line 2177 ---------------------------------------- . T: Sync, . A: Sync, . { . } . . #[cfg(feature = "nightly")] . unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter { . #[cfg_attr(feature = "inline-more", inline)] 3,290 ( 0.00%) fn drop(&mut self) { . unsafe { . // Drop all remaining elements . self.iter.drop_elements(); . . // Free the table 20,518 ( 0.00%) if let Some((ptr, layout)) = self.allocation { . self.alloc.deallocate(ptr, layout); . } . } 302 ( 0.00%) } . } . #[cfg(not(feature = "nightly"))] . impl Drop for RawIntoIter { . #[cfg_attr(feature = "inline-more", inline)] . fn drop(&mut self) { . unsafe { . // Drop all remaining elements . self.iter.drop_elements(); -- line 2203 ---------------------------------------- -- line 2209 ---------------------------------------- . } . } . } . . impl Iterator for RawIntoIter { . type Item = T; . . #[cfg_attr(feature = "inline-more", inline)] 1,905 ( 0.00%) fn next(&mut self) -> Option { 1,035 ( 0.00%) unsafe { Some(self.iter.next()?.read()) } 4,664 ( 0.00%) } . . #[inline] . fn size_hint(&self) -> (usize, Option) { 4 ( 0.00%) self.iter.size_hint() . } . } . . impl ExactSizeIterator for RawIntoIter {} . impl FusedIterator for RawIntoIter {} . . /// Iterator which consumes elements without freeing the table storage. . pub struct RawDrain<'a, T, A: Allocator + Clone = Global> { -- line 2231 ---------------------------------------- -- line 2259 ---------------------------------------- . where . T: Sync, . A: Sync, . { . } . . impl Drop for RawDrain<'_, T, A> { . #[cfg_attr(feature = "inline-more", inline)] 1,304 ( 0.00%) fn drop(&mut self) { . unsafe { . // Drop all remaining elements. Note that this may panic. . self.iter.drop_elements(); . . // Reset the contents of the table now that all elements have been . // dropped. . self.table.clear_no_drop(); . . // Move the now empty table back to its original location. 163 ( 0.00%) self.orig_table . .as_ptr() . .copy_from_nonoverlapping(&*self.table, 1); . } 1,304 ( 0.00%) } . } . . impl Iterator for RawDrain<'_, T, A> { . type Item = T; . . #[cfg_attr(feature = "inline-more", inline)] . fn next(&mut self) -> Option { . unsafe { -- line 2289 ---------------------------------------- 7,765,448 ( 0.24%) -------------------------------------------------------------------------------- -- Auto-annotated source: /usr/home/liquid/rust/worktree-benchmarking/compiler/rustc_data_structures/src/sip128.rs -------------------------------------------------------------------------------- Ir -- line 91 ---------------------------------------- . // maximum of number bytes needed to fill an 8-byte-sized element on which . // SipHash operates. Note that for variable-sized copies which are known to be . // less than 8 bytes, this function will perform more work than necessary unless . // the compiler is able to optimize the extra work away. . #[inline] . unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) { . debug_assert!(count <= 8); . 34,415 ( 0.00%) if count == 8 { . ptr::copy_nonoverlapping(src, dst, 8); . return; . } . . let mut i = 0; 41,028 ( 0.00%) if i + 3 < count { . ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4); . i += 4; . } . 88,256 ( 0.00%) if i + 1 < count { . ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2); 11,136 ( 0.00%) i += 2 . } . 41,028 ( 0.00%) if i < count { 23,437 ( 0.00%) *dst.add(i) = *src.add(i); . i += 1; . } . . debug_assert_eq!(i, count); . } . . // # Implementation . // -- line 124 ---------------------------------------- -- line 201 ---------------------------------------- . . hasher . } . . // A specialized write function for values with size <= 8. . #[inline] . fn short_write(&mut self, x: T) { . let size = mem::size_of::(); 432,335 ( 0.01%) let nbuf = self.nbuf; . debug_assert!(size <= 8); . debug_assert!(nbuf < BUFFER_SIZE); . debug_assert!(nbuf + size < BUFFER_WITH_SPILL_SIZE); . 2,924,076 ( 0.09%) if nbuf + size < BUFFER_SIZE { . unsafe { . // The memcpy call is optimized away because the size is known. . let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); . ptr::copy_nonoverlapping(&x as *const _ as *const u8, dst, size); . } . 910,071 ( 0.03%) self.nbuf = nbuf + size; . . return; . } . 151,373 ( 0.00%) unsafe { self.short_write_process_buffer(x) } . } . . // A specialized write function for values with size <= 8 that should only . // be called when the write would cause the buffer to fill. . // . // SAFETY: the write of `x` into `self.buf` starting at byte offset . // `self.nbuf` must cause `self.buf` to become fully initialized (and not . // overflow) if it wasn't already. . #[inline(never)] 45,554 ( 0.00%) unsafe fn short_write_process_buffer(&mut self, x: T) { . let size = mem::size_of::(); 45,554 ( 0.00%) let nbuf = self.nbuf; . debug_assert!(size <= 8); . debug_assert!(nbuf < BUFFER_SIZE); . debug_assert!(nbuf + size >= BUFFER_SIZE); . debug_assert!(nbuf + size < BUFFER_WITH_SPILL_SIZE); . . // Copy first part of input into end of buffer, possibly into spill . // element. The memcpy call is optimized away because the size is known. . let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); . ptr::copy_nonoverlapping(&x as *const _ as *const u8, dst, size); . . // Process buffer. . for i in 0..BUFFER_CAPACITY { 455,540 ( 0.01%) let elem = self.buf.get_unchecked(i).assume_init().to_le(); 364,432 ( 0.01%) self.state.v3 ^= elem; . Sip24Rounds::c_rounds(&mut self.state); 409,986 ( 0.01%) self.state.v0 ^= elem; . } . . // Copy remaining input into start of buffer by copying size - 1 . // elements from spill (at most size - 1 bytes could have overflowed . // into the spill). The memcpy call is optimized away because the size . // is known. And the whole copy is optimized away for size == 1. . let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8; . ptr::copy_nonoverlapping(src, self.buf.as_mut_ptr() as *mut u8, size - 1); . . // This function should only be called when the write fills the buffer. . // Therefore, when size == 1, the new `self.nbuf` must be zero. The size . // is statically known, so the branch is optimized away. 214,974 ( 0.01%) self.nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE }; 182,216 ( 0.01%) self.processed += BUFFER_SIZE; 91,108 ( 0.00%) } . . // A write function for byte slices. . #[inline] . fn slice_write(&mut self, msg: &[u8]) { . let length = msg.len(); 9,972 ( 0.00%) let nbuf = self.nbuf; . debug_assert!(nbuf < BUFFER_SIZE); . 126,635 ( 0.00%) if nbuf + length < BUFFER_SIZE { . unsafe { . let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); . 46,340 ( 0.00%) if length <= 8 { . copy_nonoverlapping_small(msg.as_ptr(), dst, length); . } else { . // This memcpy is *not* optimized away. . ptr::copy_nonoverlapping(msg.as_ptr(), dst, length); . } . } . 23,243 ( 0.00%) self.nbuf = nbuf + length; . . return; . } . 14,518 ( 0.00%) unsafe { self.slice_write_process_buffer(msg) } . } . . // A write function for byte slices that should only be called when the . // write would cause the buffer to fill. . // . // SAFETY: `self.buf` must be initialized up to the byte offset `self.nbuf`, . // and `msg` must contain enough bytes to initialize the rest of the element . // containing the byte offset `self.nbuf`. . #[inline(never)] 9,590 ( 0.00%) unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) { . let length = msg.len(); 1,918 ( 0.00%) let nbuf = self.nbuf; . debug_assert!(nbuf < BUFFER_SIZE); . debug_assert!(nbuf + length >= BUFFER_SIZE); . . // Always copy first part of input into current element of buffer. . // This function should only be called when the write fills the buffer, . // so we know that there is enough input to fill the current element. 5,754 ( 0.00%) let valid_in_elem = nbuf % ELEM_SIZE; 1,918 ( 0.00%) let needed_in_elem = ELEM_SIZE - valid_in_elem; . . let src = msg.as_ptr(); . let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); . copy_nonoverlapping_small(src, dst, needed_in_elem); . . // Process buffer. . . // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) / . // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0. . // We know that is true, because last step ensured we have a full . // element in the buffer. 3,836 ( 0.00%) let last = nbuf / ELEM_SIZE + 1; . . for i in 0..last { 14,496 ( 0.00%) let elem = self.buf.get_unchecked(i).assume_init().to_le(); 16,414 ( 0.00%) self.state.v3 ^= elem; . Sip24Rounds::c_rounds(&mut self.state); 28,992 ( 0.00%) self.state.v0 ^= elem; . } . . // Process the remaining element-sized chunks of input. . let mut processed = needed_in_elem; 3,836 ( 0.00%) let input_left = length - processed; 2,553 ( 0.00%) let elems_left = input_left / ELEM_SIZE; . let extra_bytes_left = input_left % ELEM_SIZE; . . for _ in 0..elems_left { 1,425 ( 0.00%) let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le(); 1,425 ( 0.00%) self.state.v3 ^= elem; . Sip24Rounds::c_rounds(&mut self.state); 1,425 ( 0.00%) self.state.v0 ^= elem; 2,850 ( 0.00%) processed += ELEM_SIZE; . } . . // Copy remaining input into start of buffer. . let src = msg.as_ptr().add(processed); . let dst = self.buf.as_mut_ptr() as *mut u8; . copy_nonoverlapping_small(src, dst, extra_bytes_left); . 1,918 ( 0.00%) self.nbuf = extra_bytes_left; 9,590 ( 0.00%) self.processed += nbuf + processed; 11,508 ( 0.00%) } . . #[inline] . pub fn finish128(mut self) -> (u64, u64) { . debug_assert!(self.nbuf < BUFFER_SIZE); . . // Process full elements in buffer. 28,164 ( 0.00%) let last = self.nbuf / ELEM_SIZE; . . // Since we're consuming self, avoid updating members for a potential . // performance gain. 37,552 ( 0.00%) let mut state = self.state; . . for i in 0..last { 27,004 ( 0.00%) let elem = unsafe { self.buf.get_unchecked(i).assume_init().to_le() }; 27,004 ( 0.00%) state.v3 ^= elem; . Sip24Rounds::c_rounds(&mut state); 27,004 ( 0.00%) state.v0 ^= elem; . } . . // Get remaining partial element. 18,776 ( 0.00%) let elem = if self.nbuf % ELEM_SIZE != 0 { . unsafe { . // Ensure element is initialized by writing zero bytes. At most . // `ELEM_SIZE - 1` are required given the above check. It's safe . // to write this many because we have the spill and we maintain . // `self.nbuf` such that this write will start before the spill. . let dst = (self.buf.as_mut_ptr() as *mut u8).add(self.nbuf); . ptr::write_bytes(dst, 0, ELEM_SIZE - 1); 8,194 ( 0.00%) self.buf.get_unchecked(last).assume_init().to_le() . } . } else { . 0 . }; . . // Finalize the hash. 26,371 ( 0.00%) let length = self.processed + self.nbuf; 17,574 ( 0.00%) let b: u64 = ((length as u64 & 0xff) << 56) | elem; . 8,787 ( 0.00%) state.v3 ^= b; . Sip24Rounds::c_rounds(&mut state); 8,787 ( 0.00%) state.v0 ^= b; . 8,787 ( 0.00%) state.v2 ^= 0xee; . Sip24Rounds::d_rounds(&mut state); 29,440 ( 0.00%) let _0 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3; . 11,862 ( 0.00%) state.v1 ^= 0xdd; . Sip24Rounds::d_rounds(&mut state); 11,862 ( 0.00%) let _1 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3; . . (_0, _1) . } . } . . impl Hasher for SipHasher128 { . #[inline] . fn write_u8(&mut self, i: u8) { -- line 414 ---------------------------------------- -- line 471 ---------------------------------------- . } . . #[derive(Debug, Clone, Default)] . struct Sip24Rounds; . . impl Sip24Rounds { . #[inline] . fn c_rounds(state: &mut State) { 1,715,884 ( 0.05%) compress!(state); 1,844,726 ( 0.06%) compress!(state); . } . . #[inline] . fn d_rounds(state: &mut State) { 58,872 ( 0.00%) compress!(state); 58,872 ( 0.00%) compress!(state); 58,872 ( 0.00%) compress!(state); 50,087 ( 0.00%) compress!(state); . } . } 344,006 ( 0.01%) -------------------------------------------------------------------------------- -- Auto-annotated source: /usr/home/liquid/rust/worktree-benchmarking/library/std/src/sys/unix/alloc.rs -------------------------------------------------------------------------------- Ir -- line 5 ---------------------------------------- . #[stable(feature = "alloc_system_type", since = "1.28.0")] . unsafe impl GlobalAlloc for System { . #[inline] . unsafe fn alloc(&self, layout: Layout) -> *mut u8 { . // jemalloc provides alignment less than MIN_ALIGN for small allocations. . // So only rely on MIN_ALIGN if size >= align. . // Also see and . // . 1,936,350 ( 0.06%) if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { 1,452,261 ( 0.04%) libc::malloc(layout.size()) as *mut u8 . } else { . #[cfg(target_os = "macos")] . { . if layout.align() > (1 << 31) { . return ptr::null_mut(); . } . } . aligned_malloc(&layout) . } . } . . #[inline] . unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { . // See the comment above in `alloc` for why this check looks the way it does. 185,732 ( 0.01%) if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { 278,598 ( 0.01%) libc::calloc(layout.size(), 1) as *mut u8 . } else { . let ptr = self.alloc(layout); . if !ptr.is_null() { . ptr::write_bytes(ptr, 0, layout.size()); . } . ptr . } . } . . #[inline] . unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { 530,510 ( 0.02%) libc::free(ptr as *mut libc::c_void) . } . . #[inline] . unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { 217,516 ( 0.01%) if layout.align() <= MIN_ALIGN && layout.align() <= new_size { 435,032 ( 0.01%) libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8 . } else { . realloc_fallback(self, ptr, layout, new_size) . } . } . } . . cfg_if::cfg_if! { . if #[cfg(any( -- line 56 ---------------------------------------- -- line 84 ---------------------------------------- . } else if #[cfg(target_os = "wasi")] { . #[inline] . unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { . libc::aligned_alloc(layout.align(), layout.size()) as *mut u8 . } . } else { . #[inline] . unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { 1 ( 0.00%) let mut out = ptr::null_mut(); . // posix_memalign requires that the alignment be a multiple of `sizeof(void*)`. . // Since these are all powers of 2, we can just use max. . let align = layout.align().max(crate::mem::size_of::()); 2 ( 0.00%) let ret = libc::posix_memalign(&mut out, align, layout.size()); 2 ( 0.00%) if ret != 0 { ptr::null_mut() } else { out as *mut u8 } . } . } . } 1 ( 0.00%) -------------------------------------------------------------------------------- -- Auto-annotated source: /usr/home/liquid/rust/worktree-benchmarking/compiler/rustc_span/src/caching_source_map_view.rs -------------------------------------------------------------------------------- Ir -- line 27 ---------------------------------------- . impl CacheEntry { . #[inline] . fn update( . &mut self, . new_file_and_idx: Option<(Lrc, usize)>, . pos: BytePos, . time_stamp: usize, . ) { 12,502 ( 0.00%) if let Some((file, file_idx)) = new_file_and_idx { 4,919 ( 0.00%) self.file = file; 7,234 ( 0.00%) self.file_index = file_idx; . } . 5,057 ( 0.00%) let line_index = self.file.lookup_line(pos).unwrap(); . let line_bounds = self.file.line_bounds(line_index); 18,216 ( 0.00%) self.line_number = line_index + 1; 26,061 ( 0.00%) self.line = line_bounds; . self.touch(time_stamp); . } . . #[inline] . fn touch(&mut self, time_stamp: usize) { 84,836 ( 0.00%) self.time_stamp = time_stamp; . } . } . . #[derive(Clone)] . pub struct CachingSourceMapView<'sm> { . source_map: &'sm SourceMap, . line_cache: [CacheEntry; 3], . time_stamp: usize, . } . . impl<'sm> CachingSourceMapView<'sm> { 3,700 ( 0.00%) pub fn new(source_map: &'sm SourceMap) -> CachingSourceMapView<'sm> { . let files = source_map.files(); 1,850 ( 0.00%) let first_file = files[0].clone(); . let entry = CacheEntry { . time_stamp: 0, . line_number: 0, . line: BytePos(0)..BytePos(0), . file: first_file, . file_index: 0, . }; . 9,250 ( 0.00%) CachingSourceMapView { . source_map, 12,950 ( 0.00%) line_cache: [entry.clone(), entry.clone(), entry], . time_stamp: 0, . } 7,400 ( 0.00%) } . . pub fn byte_pos_to_line_and_col( . &mut self, . pos: BytePos, . ) -> Option<(Lrc, usize, BytePos)> { . self.time_stamp += 1; . . // Check if the position is in one of the cached lines -- line 85 ---------------------------------------- -- line 106 ---------------------------------------- . }; . . let cache_entry = &mut self.line_cache[oldest]; . cache_entry.update(new_file_and_idx, pos, self.time_stamp); . . Some((cache_entry.file.clone(), cache_entry.line_number, pos - cache_entry.line.start)) . } . 325,766 ( 0.01%) pub fn span_data_to_lines_and_cols( . &mut self, . span_data: &SpanData, . ) -> Option<(Lrc, usize, BytePos, usize, BytePos)> { 186,152 ( 0.01%) self.time_stamp += 1; . . // Check if lo and hi are in the cached lines. 46,538 ( 0.00%) let lo_cache_idx = self.cache_entry_index(span_data.lo); 46,538 ( 0.00%) let hi_cache_idx = self.cache_entry_index(span_data.hi); . 76,920 ( 0.00%) if lo_cache_idx != -1 && hi_cache_idx != -1 { . // Cache hit for span lo and hi. Check if they belong to the same file. . let result = { 76,596 ( 0.00%) let lo = &self.line_cache[lo_cache_idx as usize]; . let hi = &self.line_cache[hi_cache_idx as usize]; . 229,788 ( 0.01%) if lo.file_index != hi.file_index { . return None; . } . . ( 76,596 ( 0.00%) lo.file.clone(), . lo.line_number, . span_data.lo - lo.line.start, 38,298 ( 0.00%) hi.line_number, . span_data.hi - hi.line.start, . ) . }; . 38,298 ( 0.00%) self.line_cache[lo_cache_idx as usize].touch(self.time_stamp); 38,298 ( 0.00%) self.line_cache[hi_cache_idx as usize].touch(self.time_stamp); . 153,192 ( 0.00%) return Some(result); . } . . // No cache hit or cache hit for only one of span lo and hi. 16,156 ( 0.00%) let oldest = if lo_cache_idx != -1 || hi_cache_idx != -1 { . let avoid_idx = if lo_cache_idx != -1 { lo_cache_idx } else { hi_cache_idx }; . self.oldest_cache_entry_index_avoid(avoid_idx as usize) . } else { . self.oldest_cache_entry_index() . }; . . // If the entry doesn't point to the correct file, get the new file and index. . // Return early if the file containing beginning of span doesn't contain end of span. 62,968 ( 0.00%) let new_file_and_idx = if !file_contains(&self.line_cache[oldest].file, span_data.lo) { 15,915 ( 0.00%) let new_file_and_idx = self.file_for_position(span_data.lo)?; 19,098 ( 0.00%) if !file_contains(&new_file_and_idx.0, span_data.hi) { . return None; . } . 12,732 ( 0.00%) Some(new_file_and_idx) . } else { . let file = &self.line_cache[oldest].file; 15,171 ( 0.00%) if !file_contains(&file, span_data.hi) { . return None; . } . 15,171 ( 0.00%) None . }; . . // Update the cache entries. 41,200 ( 0.00%) let (lo_idx, hi_idx) = match (lo_cache_idx, hi_cache_idx) { . // Oldest cache entry is for span_data.lo line. . (-1, -1) => { . let lo = &mut self.line_cache[oldest]; . lo.update(new_file_and_idx, span_data.lo, self.time_stamp); . 17,841 ( 0.00%) if !lo.line.contains(&span_data.hi) { . let new_file_and_idx = Some((lo.file.clone(), lo.file_index)); . let next_oldest = self.oldest_cache_entry_index_avoid(oldest); . let hi = &mut self.line_cache[next_oldest]; . hi.update(new_file_and_idx, span_data.hi, self.time_stamp); . (oldest, next_oldest) . } else { . (oldest, oldest) . } . } . // Oldest cache entry is for span_data.lo line. . (-1, _) => { . let lo = &mut self.line_cache[oldest]; . lo.update(new_file_and_idx, span_data.lo, self.time_stamp); 324 ( 0.00%) let hi = &mut self.line_cache[hi_cache_idx as usize]; 486 ( 0.00%) hi.touch(self.time_stamp); . (oldest, hi_cache_idx as usize) . } . // Oldest cache entry is for span_data.hi line. . (_, -1) => { . let hi = &mut self.line_cache[oldest]; 2,131 ( 0.00%) hi.update(new_file_and_idx, span_data.hi, self.time_stamp); 4,262 ( 0.00%) let lo = &mut self.line_cache[lo_cache_idx as usize]; 8,524 ( 0.00%) lo.touch(self.time_stamp); . (lo_cache_idx as usize, oldest) . } . _ => { . panic!(); . } . }; . . let lo = &self.line_cache[lo_idx]; . let hi = &self.line_cache[hi_idx]; . . // Span lo and hi may equal line end when last line doesn't . // end in newline, hence the inclusive upper bounds below. 16,480 ( 0.00%) assert!(span_data.lo >= lo.line.start); 8,240 ( 0.00%) assert!(span_data.lo <= lo.line.end); 16,480 ( 0.00%) assert!(span_data.hi >= hi.line.start); 8,240 ( 0.00%) assert!(span_data.hi <= hi.line.end); 41,200 ( 0.00%) assert!(lo.file.contains(span_data.lo)); 24,720 ( 0.00%) assert!(lo.file.contains(span_data.hi)); 24,720 ( 0.00%) assert_eq!(lo.file_index, hi.file_index); . 24,720 ( 0.00%) Some(( 8,240 ( 0.00%) lo.file.clone(), . lo.line_number, . span_data.lo - lo.line.start, 8,240 ( 0.00%) hi.line_number, . span_data.hi - hi.line.start, . )) 418,842 ( 0.01%) } . . fn cache_entry_index(&self, pos: BytePos) -> isize { . for (idx, cache_entry) in self.line_cache.iter().enumerate() { 445,152 ( 0.01%) if cache_entry.line.contains(&pos) { . return idx as isize; . } . } . . -1 . } . . fn oldest_cache_entry_index(&self) -> usize { . let mut oldest = 0; . . for idx in 1..self.line_cache.len() { 35,682 ( 0.00%) if self.line_cache[idx].time_stamp < self.line_cache[oldest].time_stamp { . oldest = idx; . } . } . . oldest . } . . fn oldest_cache_entry_index_avoid(&self, avoid_idx: usize) -> usize { . let mut oldest = if avoid_idx != 0 { 0 } else { 1 }; . . for idx in 0..self.line_cache.len() { 18,706 ( 0.00%) if idx != avoid_idx 8,784 ( 0.00%) && self.line_cache[idx].time_stamp < self.line_cache[oldest].time_stamp . { . oldest = idx; . } . } . . oldest . } . 15,915 ( 0.00%) fn file_for_position(&self, pos: BytePos) -> Option<(Lrc, usize)> { 3,183 ( 0.00%) if !self.source_map.files().is_empty() { 6,366 ( 0.00%) let file_idx = self.source_map.lookup_source_file_idx(pos); . let file = &self.source_map.files()[file_idx]; . 25,464 ( 0.00%) if file_contains(file, pos) { . return Some((file.clone(), file_idx)); . } . } . . None 15,915 ( 0.00%) } . } . . #[inline] . fn file_contains(file: &SourceFile, pos: BytePos) -> bool { . // `SourceMap::lookup_source_file_idx` and `SourceFile::contains` both consider the position . // one past the end of a file to belong to it. Normally, that's what we want. But for the . // purposes of converting a byte position to a line and column number, we can't come up with a . // line and column number if the file is empty, because an empty file doesn't contain any -- line 290 ---------------------------------------- 656,351 ( 0.02%) -------------------------------------------------------------------------------- The following files chosen for auto-annotation could not be found: -------------------------------------------------------------------------------- ./elf/../sysdeps/x86_64/dl-machine.h ./elf/dl-lookup.c ./malloc/malloc.c ./stdlib/msort.c ./string/../sysdeps/x86_64/multiarch/memchr-avx2.S ./string/../sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S ./string/../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S ./string/../sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S ./string/../sysdeps/x86_64/multiarch/strcmp-avx2.S ./string/../sysdeps/x86_64/multiarch/strlen-avx2.S /tmp/gcc-build/x86_64-unknown-linux-gnu/libstdc++-v3/libsupc++/../../../../gcc-5.5.0/libstdc++-v3/libsupc++/new_op.cc -------------------------------------------------------------------------------- Ir -------------------------------------------------------------------------------- 118,984,592 ( 3.67%) events annotated