| File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h |
| Warning: | line 85, column 47 The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// | |||
| 2 | // | |||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | |||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
| 6 | // | |||
| 7 | //===----------------------------------------------------------------------===// | |||
| 8 | ||||
| 9 | #include "llvm/MC/MCAssembler.h" | |||
| 10 | #include "llvm/ADT/ArrayRef.h" | |||
| 11 | #include "llvm/ADT/SmallString.h" | |||
| 12 | #include "llvm/ADT/SmallVector.h" | |||
| 13 | #include "llvm/ADT/Statistic.h" | |||
| 14 | #include "llvm/ADT/StringRef.h" | |||
| 15 | #include "llvm/ADT/Twine.h" | |||
| 16 | #include "llvm/MC/MCAsmBackend.h" | |||
| 17 | #include "llvm/MC/MCAsmInfo.h" | |||
| 18 | #include "llvm/MC/MCAsmLayout.h" | |||
| 19 | #include "llvm/MC/MCCodeEmitter.h" | |||
| 20 | #include "llvm/MC/MCCodeView.h" | |||
| 21 | #include "llvm/MC/MCContext.h" | |||
| 22 | #include "llvm/MC/MCDwarf.h" | |||
| 23 | #include "llvm/MC/MCExpr.h" | |||
| 24 | #include "llvm/MC/MCFixup.h" | |||
| 25 | #include "llvm/MC/MCFixupKindInfo.h" | |||
| 26 | #include "llvm/MC/MCFragment.h" | |||
| 27 | #include "llvm/MC/MCInst.h" | |||
| 28 | #include "llvm/MC/MCObjectWriter.h" | |||
| 29 | #include "llvm/MC/MCSection.h" | |||
| 30 | #include "llvm/MC/MCSectionELF.h" | |||
| 31 | #include "llvm/MC/MCSymbol.h" | |||
| 32 | #include "llvm/MC/MCValue.h" | |||
| 33 | #include "llvm/Support/Alignment.h" | |||
| 34 | #include "llvm/Support/Casting.h" | |||
| 35 | #include "llvm/Support/Debug.h" | |||
| 36 | #include "llvm/Support/EndianStream.h" | |||
| 37 | #include "llvm/Support/ErrorHandling.h" | |||
| 38 | #include "llvm/Support/LEB128.h" | |||
| 39 | #include "llvm/Support/MathExtras.h" | |||
| 40 | #include "llvm/Support/raw_ostream.h" | |||
| 41 | #include <cassert> | |||
| 42 | #include <cstdint> | |||
| 43 | #include <cstring> | |||
| 44 | #include <tuple> | |||
| 45 | #include <utility> | |||
| 46 | ||||
| 47 | using namespace llvm; | |||
| 48 | ||||
| 49 | #define DEBUG_TYPE"assembler" "assembler" | |||
| 50 | ||||
| 51 | namespace { | |||
| 52 | namespace stats { | |||
| 53 | ||||
| 54 | STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total")static llvm::Statistic EmittedFragments = {"assembler", "EmittedFragments" , "Number of emitted assembler fragments - total"}; | |||
| 55 | STATISTIC(EmittedRelaxableFragments,static llvm::Statistic EmittedRelaxableFragments = {"assembler" , "EmittedRelaxableFragments", "Number of emitted assembler fragments - relaxable" } | |||
| 56 | "Number of emitted assembler fragments - relaxable")static llvm::Statistic EmittedRelaxableFragments = {"assembler" , "EmittedRelaxableFragments", "Number of emitted assembler fragments - relaxable" }; | |||
| 57 | STATISTIC(EmittedDataFragments,static llvm::Statistic EmittedDataFragments = {"assembler", "EmittedDataFragments" , "Number of emitted assembler fragments - data"} | |||
| 58 | "Number of emitted assembler fragments - data")static llvm::Statistic EmittedDataFragments = {"assembler", "EmittedDataFragments" , "Number of emitted assembler fragments - data"}; | |||
| 59 | STATISTIC(EmittedCompactEncodedInstFragments,static llvm::Statistic EmittedCompactEncodedInstFragments = { "assembler", "EmittedCompactEncodedInstFragments", "Number of emitted assembler fragments - compact encoded inst" } | |||
| 60 | "Number of emitted assembler fragments - compact encoded inst")static llvm::Statistic EmittedCompactEncodedInstFragments = { "assembler", "EmittedCompactEncodedInstFragments", "Number of emitted assembler fragments - compact encoded inst" }; | |||
| 61 | STATISTIC(EmittedAlignFragments,static llvm::Statistic EmittedAlignFragments = {"assembler", "EmittedAlignFragments" , "Number of emitted assembler fragments - align"} | |||
| 62 | "Number of emitted assembler fragments - align")static llvm::Statistic EmittedAlignFragments = {"assembler", "EmittedAlignFragments" , "Number of emitted assembler fragments - align"}; | |||
| 63 | STATISTIC(EmittedFillFragments,static llvm::Statistic EmittedFillFragments = {"assembler", "EmittedFillFragments" , "Number of emitted assembler fragments - fill"} | |||
| 64 | "Number of emitted assembler fragments - fill")static llvm::Statistic EmittedFillFragments = {"assembler", "EmittedFillFragments" , "Number of emitted assembler fragments - fill"}; | |||
| 65 | STATISTIC(EmittedNopsFragments, "Number of emitted assembler fragments - nops")static llvm::Statistic EmittedNopsFragments = {"assembler", "EmittedNopsFragments" , "Number of emitted assembler fragments - nops"}; | |||
| 66 | STATISTIC(EmittedOrgFragments, "Number of emitted assembler fragments - org")static llvm::Statistic EmittedOrgFragments = {"assembler", "EmittedOrgFragments" , "Number of emitted assembler fragments - org"}; | |||
| 67 | STATISTIC(evaluateFixup, "Number of evaluated fixups")static llvm::Statistic evaluateFixup = {"assembler", "evaluateFixup" , "Number of evaluated fixups"}; | |||
| 68 | STATISTIC(FragmentLayouts, "Number of fragment layouts")static llvm::Statistic FragmentLayouts = {"assembler", "FragmentLayouts" , "Number of fragment layouts"}; | |||
| 69 | STATISTIC(ObjectBytes, "Number of emitted object file bytes")static llvm::Statistic ObjectBytes = {"assembler", "ObjectBytes" , "Number of emitted object file bytes"}; | |||
| 70 | STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps")static llvm::Statistic RelaxationSteps = {"assembler", "RelaxationSteps" , "Number of assembler layout and relaxation steps"}; | |||
| 71 | STATISTIC(RelaxedInstructions, "Number of relaxed instructions")static llvm::Statistic RelaxedInstructions = {"assembler", "RelaxedInstructions" , "Number of relaxed instructions"}; | |||
| 72 | ||||
| 73 | } // end namespace stats | |||
| 74 | } // end anonymous namespace | |||
| 75 | ||||
| 76 | // FIXME FIXME FIXME: There are number of places in this file where we convert | |||
| 77 | // what is a 64-bit assembler value used for computation into a value in the | |||
| 78 | // object file, which may truncate it. We should detect that truncation where | |||
| 79 | // invalid and report errors back. | |||
| 80 | ||||
| 81 | /* *** */ | |||
| 82 | ||||
| 83 | MCAssembler::MCAssembler(MCContext &Context, | |||
| 84 | std::unique_ptr<MCAsmBackend> Backend, | |||
| 85 | std::unique_ptr<MCCodeEmitter> Emitter, | |||
| 86 | std::unique_ptr<MCObjectWriter> Writer) | |||
| 87 | : Context(Context), Backend(std::move(Backend)), | |||
| 88 | Emitter(std::move(Emitter)), Writer(std::move(Writer)), | |||
| 89 | BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false), | |||
| 90 | IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) { | |||
| 91 | VersionInfo.Major = 0; // Major version == 0 for "none specified" | |||
| 92 | } | |||
| 93 | ||||
| 94 | MCAssembler::~MCAssembler() = default; | |||
| 95 | ||||
| 96 | void MCAssembler::reset() { | |||
| 97 | Sections.clear(); | |||
| 98 | Symbols.clear(); | |||
| 99 | IndirectSymbols.clear(); | |||
| 100 | DataRegions.clear(); | |||
| 101 | LinkerOptions.clear(); | |||
| 102 | FileNames.clear(); | |||
| 103 | ThumbFuncs.clear(); | |||
| 104 | BundleAlignSize = 0; | |||
| 105 | RelaxAll = false; | |||
| 106 | SubsectionsViaSymbols = false; | |||
| 107 | IncrementalLinkerCompatible = false; | |||
| 108 | ELFHeaderEFlags = 0; | |||
| 109 | LOHContainer.reset(); | |||
| 110 | VersionInfo.Major = 0; | |||
| 111 | VersionInfo.SDKVersion = VersionTuple(); | |||
| 112 | ||||
| 113 | // reset objects owned by us | |||
| 114 | if (getBackendPtr()) | |||
| 115 | getBackendPtr()->reset(); | |||
| 116 | if (getEmitterPtr()) | |||
| 117 | getEmitterPtr()->reset(); | |||
| 118 | if (getWriterPtr()) | |||
| 119 | getWriterPtr()->reset(); | |||
| 120 | getLOHContainer().reset(); | |||
| 121 | } | |||
| 122 | ||||
| 123 | bool MCAssembler::registerSection(MCSection &Section) { | |||
| 124 | if (Section.isRegistered()) | |||
| 125 | return false; | |||
| 126 | Sections.push_back(&Section); | |||
| 127 | Section.setIsRegistered(true); | |||
| 128 | return true; | |||
| 129 | } | |||
| 130 | ||||
| 131 | bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const { | |||
| 132 | if (ThumbFuncs.count(Symbol)) | |||
| 133 | return true; | |||
| 134 | ||||
| 135 | if (!Symbol->isVariable()) | |||
| 136 | return false; | |||
| 137 | ||||
| 138 | const MCExpr *Expr = Symbol->getVariableValue(); | |||
| 139 | ||||
| 140 | MCValue V; | |||
| 141 | if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr)) | |||
| 142 | return false; | |||
| 143 | ||||
| 144 | if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None) | |||
| 145 | return false; | |||
| 146 | ||||
| 147 | const MCSymbolRefExpr *Ref = V.getSymA(); | |||
| 148 | if (!Ref) | |||
| 149 | return false; | |||
| 150 | ||||
| 151 | if (Ref->getKind() != MCSymbolRefExpr::VK_None) | |||
| 152 | return false; | |||
| 153 | ||||
| 154 | const MCSymbol &Sym = Ref->getSymbol(); | |||
| 155 | if (!isThumbFunc(&Sym)) | |||
| 156 | return false; | |||
| 157 | ||||
| 158 | ThumbFuncs.insert(Symbol); // Cache it. | |||
| 159 | return true; | |||
| 160 | } | |||
| 161 | ||||
| 162 | bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { | |||
| 163 | // Non-temporary labels should always be visible to the linker. | |||
| 164 | if (!Symbol.isTemporary()) | |||
| 165 | return true; | |||
| 166 | ||||
| 167 | if (Symbol.isUsedInReloc()) | |||
| 168 | return true; | |||
| 169 | ||||
| 170 | return false; | |||
| 171 | } | |||
| 172 | ||||
| 173 | const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const { | |||
| 174 | // Linker visible symbols define atoms. | |||
| 175 | if (isSymbolLinkerVisible(S)) | |||
| 176 | return &S; | |||
| 177 | ||||
| 178 | // Absolute and undefined symbols have no defining atom. | |||
| 179 | if (!S.isInSection()) | |||
| 180 | return nullptr; | |||
| 181 | ||||
| 182 | // Non-linker visible symbols in sections which can't be atomized have no | |||
| 183 | // defining atom. | |||
| 184 | if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols( | |||
| 185 | *S.getFragment()->getParent())) | |||
| 186 | return nullptr; | |||
| 187 | ||||
| 188 | // Otherwise, return the atom for the containing fragment. | |||
| 189 | return S.getFragment()->getAtom(); | |||
| 190 | } | |||
| 191 | ||||
| 192 | bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, | |||
| 193 | const MCFixup &Fixup, const MCFragment *DF, | |||
| 194 | MCValue &Target, uint64_t &Value, | |||
| 195 | bool &WasForced) const { | |||
| 196 | ++stats::evaluateFixup; | |||
| 197 | ||||
| 198 | // FIXME: This code has some duplication with recordRelocation. We should | |||
| 199 | // probably merge the two into a single callback that tries to evaluate a | |||
| 200 | // fixup and records a relocation if one is needed. | |||
| 201 | ||||
| 202 | // On error claim to have completely evaluated the fixup, to prevent any | |||
| 203 | // further processing from being done. | |||
| 204 | const MCExpr *Expr = Fixup.getValue(); | |||
| 205 | MCContext &Ctx = getContext(); | |||
| 206 | Value = 0; | |||
| 207 | WasForced = false; | |||
| 208 | if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) { | |||
| 209 | Ctx.reportError(Fixup.getLoc(), "expected relocatable expression"); | |||
| 210 | return true; | |||
| 211 | } | |||
| 212 | if (const MCSymbolRefExpr *RefB = Target.getSymB()) { | |||
| 213 | if (RefB->getKind() != MCSymbolRefExpr::VK_None) { | |||
| 214 | Ctx.reportError(Fixup.getLoc(), | |||
| 215 | "unsupported subtraction of qualified symbol"); | |||
| 216 | return true; | |||
| 217 | } | |||
| 218 | } | |||
| 219 | ||||
| 220 | assert(getBackendPtr() && "Expected assembler backend")((void)0); | |||
| 221 | bool IsTarget = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & | |||
| 222 | MCFixupKindInfo::FKF_IsTarget; | |||
| 223 | ||||
| 224 | if (IsTarget) | |||
| 225 | return getBackend().evaluateTargetFixup(*this, Layout, Fixup, DF, Target, | |||
| 226 | Value, WasForced); | |||
| 227 | ||||
| 228 | unsigned FixupFlags = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags; | |||
| 229 | bool IsPCRel = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & | |||
| 230 | MCFixupKindInfo::FKF_IsPCRel; | |||
| 231 | ||||
| 232 | bool IsResolved = false; | |||
| 233 | if (IsPCRel) { | |||
| 234 | if (Target.getSymB()) { | |||
| 235 | IsResolved = false; | |||
| 236 | } else if (!Target.getSymA()) { | |||
| 237 | IsResolved = false; | |||
| 238 | } else { | |||
| 239 | const MCSymbolRefExpr *A = Target.getSymA(); | |||
| 240 | const MCSymbol &SA = A->getSymbol(); | |||
| 241 | if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) { | |||
| 242 | IsResolved = false; | |||
| 243 | } else if (auto *Writer = getWriterPtr()) { | |||
| 244 | IsResolved = (FixupFlags & MCFixupKindInfo::FKF_Constant) || | |||
| 245 | Writer->isSymbolRefDifferenceFullyResolvedImpl( | |||
| 246 | *this, SA, *DF, false, true); | |||
| 247 | } | |||
| 248 | } | |||
| 249 | } else { | |||
| 250 | IsResolved = Target.isAbsolute(); | |||
| 251 | } | |||
| 252 | ||||
| 253 | Value = Target.getConstant(); | |||
| 254 | ||||
| 255 | if (const MCSymbolRefExpr *A = Target.getSymA()) { | |||
| 256 | const MCSymbol &Sym = A->getSymbol(); | |||
| 257 | if (Sym.isDefined()) | |||
| 258 | Value += Layout.getSymbolOffset(Sym); | |||
| 259 | } | |||
| 260 | if (const MCSymbolRefExpr *B = Target.getSymB()) { | |||
| 261 | const MCSymbol &Sym = B->getSymbol(); | |||
| 262 | if (Sym.isDefined()) | |||
| 263 | Value -= Layout.getSymbolOffset(Sym); | |||
| 264 | } | |||
| 265 | ||||
| 266 | bool ShouldAlignPC = getBackend().getFixupKindInfo(Fixup.getKind()).Flags & | |||
| 267 | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; | |||
| 268 | assert((ShouldAlignPC ? IsPCRel : true) &&((void)0) | |||
| 269 | "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!")((void)0); | |||
| 270 | ||||
| 271 | if (IsPCRel) { | |||
| 272 | uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); | |||
| 273 | ||||
| 274 | // A number of ARM fixups in Thumb mode require that the effective PC | |||
| 275 | // address be determined as the 32-bit aligned version of the actual offset. | |||
| 276 | if (ShouldAlignPC) Offset &= ~0x3; | |||
| 277 | Value -= Offset; | |||
| 278 | } | |||
| 279 | ||||
| 280 | // Let the backend force a relocation if needed. | |||
| 281 | if (IsResolved && getBackend().shouldForceRelocation(*this, Fixup, Target)) { | |||
| 282 | IsResolved = false; | |||
| 283 | WasForced = true; | |||
| 284 | } | |||
| 285 | ||||
| 286 | return IsResolved; | |||
| 287 | } | |||
| 288 | ||||
| 289 | uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, | |||
| 290 | const MCFragment &F) const { | |||
| 291 | assert(getBackendPtr() && "Requires assembler backend")((void)0); | |||
| 292 | switch (F.getKind()) { | |||
| 293 | case MCFragment::FT_Data: | |||
| 294 | return cast<MCDataFragment>(F).getContents().size(); | |||
| 295 | case MCFragment::FT_Relaxable: | |||
| 296 | return cast<MCRelaxableFragment>(F).getContents().size(); | |||
| 297 | case MCFragment::FT_CompactEncodedInst: | |||
| 298 | return cast<MCCompactEncodedInstFragment>(F).getContents().size(); | |||
| 299 | case MCFragment::FT_Fill: { | |||
| 300 | auto &FF = cast<MCFillFragment>(F); | |||
| 301 | int64_t NumValues = 0; | |||
| 302 | if (!FF.getNumValues().evaluateAsAbsolute(NumValues, Layout)) { | |||
| 303 | getContext().reportError(FF.getLoc(), | |||
| 304 | "expected assembly-time absolute expression"); | |||
| 305 | return 0; | |||
| 306 | } | |||
| 307 | int64_t Size = NumValues * FF.getValueSize(); | |||
| 308 | if (Size < 0) { | |||
| 309 | getContext().reportError(FF.getLoc(), "invalid number of bytes"); | |||
| 310 | return 0; | |||
| 311 | } | |||
| 312 | return Size; | |||
| 313 | } | |||
| 314 | ||||
| 315 | case MCFragment::FT_Nops: | |||
| 316 | return cast<MCNopsFragment>(F).getNumBytes(); | |||
| 317 | ||||
| 318 | case MCFragment::FT_LEB: | |||
| 319 | return cast<MCLEBFragment>(F).getContents().size(); | |||
| 320 | ||||
| 321 | case MCFragment::FT_BoundaryAlign: | |||
| 322 | return cast<MCBoundaryAlignFragment>(F).getSize(); | |||
| 323 | ||||
| 324 | case MCFragment::FT_SymbolId: | |||
| 325 | return 4; | |||
| 326 | ||||
| 327 | case MCFragment::FT_Align: { | |||
| 328 | const MCAlignFragment &AF = cast<MCAlignFragment>(F); | |||
| 329 | unsigned Offset = Layout.getFragmentOffset(&AF); | |||
| 330 | unsigned Size = offsetToAlignment(Offset, Align(AF.getAlignment())); | |||
| 331 | ||||
| 332 | // Insert extra Nops for code alignment if the target define | |||
| 333 | // shouldInsertExtraNopBytesForCodeAlign target hook. | |||
| 334 | if (AF.getParent()->UseCodeAlign() && AF.hasEmitNops() && | |||
| 335 | getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) | |||
| 336 | return Size; | |||
| 337 | ||||
| 338 | // If we are padding with nops, force the padding to be larger than the | |||
| 339 | // minimum nop size. | |||
| 340 | if (Size > 0 && AF.hasEmitNops()) { | |||
| 341 | while (Size % getBackend().getMinimumNopSize()) | |||
| 342 | Size += AF.getAlignment(); | |||
| 343 | } | |||
| 344 | if (Size > AF.getMaxBytesToEmit()) | |||
| 345 | return 0; | |||
| 346 | return Size; | |||
| 347 | } | |||
| 348 | ||||
| 349 | case MCFragment::FT_Org: { | |||
| 350 | const MCOrgFragment &OF = cast<MCOrgFragment>(F); | |||
| 351 | MCValue Value; | |||
| 352 | if (!OF.getOffset().evaluateAsValue(Value, Layout)) { | |||
| 353 | getContext().reportError(OF.getLoc(), | |||
| 354 | "expected assembly-time absolute expression"); | |||
| 355 | return 0; | |||
| 356 | } | |||
| 357 | ||||
| 358 | uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); | |||
| 359 | int64_t TargetLocation = Value.getConstant(); | |||
| 360 | if (const MCSymbolRefExpr *A = Value.getSymA()) { | |||
| 361 | uint64_t Val; | |||
| 362 | if (!Layout.getSymbolOffset(A->getSymbol(), Val)) { | |||
| 363 | getContext().reportError(OF.getLoc(), "expected absolute expression"); | |||
| 364 | return 0; | |||
| 365 | } | |||
| 366 | TargetLocation += Val; | |||
| 367 | } | |||
| 368 | int64_t Size = TargetLocation - FragmentOffset; | |||
| 369 | if (Size < 0 || Size >= 0x40000000) { | |||
| 370 | getContext().reportError( | |||
| 371 | OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) + | |||
| 372 | "' (at offset '" + Twine(FragmentOffset) + "')"); | |||
| 373 | return 0; | |||
| 374 | } | |||
| 375 | return Size; | |||
| 376 | } | |||
| 377 | ||||
| 378 | case MCFragment::FT_Dwarf: | |||
| 379 | return cast<MCDwarfLineAddrFragment>(F).getContents().size(); | |||
| 380 | case MCFragment::FT_DwarfFrame: | |||
| 381 | return cast<MCDwarfCallFrameFragment>(F).getContents().size(); | |||
| 382 | case MCFragment::FT_CVInlineLines: | |||
| 383 | return cast<MCCVInlineLineTableFragment>(F).getContents().size(); | |||
| 384 | case MCFragment::FT_CVDefRange: | |||
| 385 | return cast<MCCVDefRangeFragment>(F).getContents().size(); | |||
| 386 | case MCFragment::FT_PseudoProbe: | |||
| 387 | return cast<MCPseudoProbeAddrFragment>(F).getContents().size(); | |||
| 388 | case MCFragment::FT_Dummy: | |||
| 389 | llvm_unreachable("Should not have been added")__builtin_unreachable(); | |||
| 390 | } | |||
| 391 | ||||
| 392 | llvm_unreachable("invalid fragment kind")__builtin_unreachable(); | |||
| 393 | } | |||
| 394 | ||||
| 395 | void MCAsmLayout::layoutFragment(MCFragment *F) { | |||
| 396 | MCFragment *Prev = F->getPrevNode(); | |||
| 397 | ||||
| 398 | // We should never try to recompute something which is valid. | |||
| 399 | assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!")((void)0); | |||
| 400 | // We should never try to compute the fragment layout if its predecessor | |||
| 401 | // isn't valid. | |||
| 402 | assert((!Prev || isFragmentValid(Prev)) &&((void)0) | |||
| 403 | "Attempt to compute fragment before its predecessor!")((void)0); | |||
| 404 | ||||
| 405 | assert(!F->IsBeingLaidOut && "Already being laid out!")((void)0); | |||
| 406 | F->IsBeingLaidOut = true; | |||
| 407 | ||||
| 408 | ++stats::FragmentLayouts; | |||
| 409 | ||||
| 410 | // Compute fragment offset and size. | |||
| 411 | if (Prev) | |||
| 412 | F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); | |||
| 413 | else | |||
| 414 | F->Offset = 0; | |||
| 415 | F->IsBeingLaidOut = false; | |||
| 416 | LastValidFragment[F->getParent()] = F; | |||
| 417 | ||||
| 418 | // If bundling is enabled and this fragment has instructions in it, it has to | |||
| 419 | // obey the bundling restrictions. With padding, we'll have: | |||
| 420 | // | |||
| 421 | // | |||
| 422 | // BundlePadding | |||
| 423 | // ||| | |||
| 424 | // ------------------------------------- | |||
| 425 | // Prev |##########| F | | |||
| 426 | // ------------------------------------- | |||
| 427 | // ^ | |||
| 428 | // | | |||
| 429 | // F->Offset | |||
| 430 | // | |||
| 431 | // The fragment's offset will point to after the padding, and its computed | |||
| 432 | // size won't include the padding. | |||
| 433 | // | |||
| 434 | // When the -mc-relax-all flag is used, we optimize bundling by writting the | |||
| 435 | // padding directly into fragments when the instructions are emitted inside | |||
| 436 | // the streamer. When the fragment is larger than the bundle size, we need to | |||
| 437 | // ensure that it's bundle aligned. This means that if we end up with | |||
| 438 | // multiple fragments, we must emit bundle padding between fragments. | |||
| 439 | // | |||
| 440 | // ".align N" is an example of a directive that introduces multiple | |||
| 441 | // fragments. We could add a special case to handle ".align N" by emitting | |||
| 442 | // within-fragment padding (which would produce less padding when N is less | |||
| 443 | // than the bundle size), but for now we don't. | |||
| 444 | // | |||
| 445 | if (Assembler.isBundlingEnabled() && F->hasInstructions()) { | |||
| 446 | assert(isa<MCEncodedFragment>(F) &&((void)0) | |||
| 447 | "Only MCEncodedFragment implementations have instructions")((void)0); | |||
| 448 | MCEncodedFragment *EF = cast<MCEncodedFragment>(F); | |||
| 449 | uint64_t FSize = Assembler.computeFragmentSize(*this, *EF); | |||
| 450 | ||||
| 451 | if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize()) | |||
| 452 | report_fatal_error("Fragment can't be larger than a bundle size"); | |||
| 453 | ||||
| 454 | uint64_t RequiredBundlePadding = | |||
| 455 | computeBundlePadding(Assembler, EF, EF->Offset, FSize); | |||
| 456 | if (RequiredBundlePadding > UINT8_MAX0xff) | |||
| 457 | report_fatal_error("Padding cannot exceed 255 bytes"); | |||
| 458 | EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); | |||
| 459 | EF->Offset += RequiredBundlePadding; | |||
| 460 | } | |||
| 461 | } | |||
| 462 | ||||
| 463 | void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) { | |||
| 464 | bool New = !Symbol.isRegistered(); | |||
| 465 | if (Created) | |||
| 466 | *Created = New; | |||
| 467 | if (New) { | |||
| 468 | Symbol.setIsRegistered(true); | |||
| 469 | Symbols.push_back(&Symbol); | |||
| 470 | } | |||
| 471 | } | |||
| 472 | ||||
| 473 | void MCAssembler::writeFragmentPadding(raw_ostream &OS, | |||
| 474 | const MCEncodedFragment &EF, | |||
| 475 | uint64_t FSize) const { | |||
| 476 | assert(getBackendPtr() && "Expected assembler backend")((void)0); | |||
| 477 | // Should NOP padding be written out before this fragment? | |||
| 478 | unsigned BundlePadding = EF.getBundlePadding(); | |||
| 479 | if (BundlePadding > 0) { | |||
| 480 | assert(isBundlingEnabled() &&((void)0) | |||
| 481 | "Writing bundle padding with disabled bundling")((void)0); | |||
| 482 | assert(EF.hasInstructions() &&((void)0) | |||
| 483 | "Writing bundle padding for a fragment without instructions")((void)0); | |||
| 484 | ||||
| 485 | unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize); | |||
| 486 | if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { | |||
| 487 | // If the padding itself crosses a bundle boundary, it must be emitted | |||
| 488 | // in 2 pieces, since even nop instructions must not cross boundaries. | |||
| 489 | // v--------------v <- BundleAlignSize | |||
| 490 | // v---------v <- BundlePadding | |||
| 491 | // ---------------------------- | |||
| 492 | // | Prev |####|####| F | | |||
| 493 | // ---------------------------- | |||
| 494 | // ^-------------------^ <- TotalLength | |||
| 495 | unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); | |||
| 496 | if (!getBackend().writeNopData(OS, DistanceToBoundary)) | |||
| 497 | report_fatal_error("unable to write NOP sequence of " + | |||
| 498 | Twine(DistanceToBoundary) + " bytes"); | |||
| 499 | BundlePadding -= DistanceToBoundary; | |||
| 500 | } | |||
| 501 | if (!getBackend().writeNopData(OS, BundlePadding)) | |||
| 502 | report_fatal_error("unable to write NOP sequence of " + | |||
| 503 | Twine(BundlePadding) + " bytes"); | |||
| 504 | } | |||
| 505 | } | |||
| 506 | ||||
| 507 | /// Write the fragment \p F to the output file. | |||
| 508 | static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, | |||
| 509 | const MCAsmLayout &Layout, const MCFragment &F) { | |||
| 510 | // FIXME: Embed in fragments instead? | |||
| 511 | uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); | |||
| 512 | ||||
| 513 | support::endianness Endian = Asm.getBackend().Endian; | |||
| 514 | ||||
| 515 | if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(&F)) | |||
| 516 | Asm.writeFragmentPadding(OS, *EF, FragmentSize); | |||
| 517 | ||||
| 518 | // This variable (and its dummy usage) is to participate in the assert at | |||
| 519 | // the end of the function. | |||
| 520 | uint64_t Start = OS.tell(); | |||
| 521 | (void) Start; | |||
| 522 | ||||
| 523 | ++stats::EmittedFragments; | |||
| 524 | ||||
| 525 | switch (F.getKind()) { | |||
| 526 | case MCFragment::FT_Align: { | |||
| 527 | ++stats::EmittedAlignFragments; | |||
| 528 | const MCAlignFragment &AF = cast<MCAlignFragment>(F); | |||
| 529 | assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!")((void)0); | |||
| 530 | ||||
| 531 | uint64_t Count = FragmentSize / AF.getValueSize(); | |||
| 532 | ||||
| 533 | // FIXME: This error shouldn't actually occur (the front end should emit | |||
| 534 | // multiple .align directives to enforce the semantics it wants), but is | |||
| 535 | // severe enough that we want to report it. How to handle this? | |||
| 536 | if (Count * AF.getValueSize() != FragmentSize) | |||
| 537 | report_fatal_error("undefined .align directive, value size '" + | |||
| 538 | Twine(AF.getValueSize()) + | |||
| 539 | "' is not a divisor of padding size '" + | |||
| 540 | Twine(FragmentSize) + "'"); | |||
| 541 | ||||
| 542 | // See if we are aligning with nops, and if so do that first to try to fill | |||
| 543 | // the Count bytes. Then if that did not fill any bytes or there are any | |||
| 544 | // bytes left to fill use the Value and ValueSize to fill the rest. | |||
| 545 | // If we are aligning with nops, ask that target to emit the right data. | |||
| 546 | if (AF.hasEmitNops()) { | |||
| 547 | if (!Asm.getBackend().writeNopData(OS, Count)) | |||
| 548 | report_fatal_error("unable to write nop sequence of " + | |||
| 549 | Twine(Count) + " bytes"); | |||
| 550 | break; | |||
| 551 | } | |||
| 552 | ||||
| 553 | // Otherwise, write out in multiples of the value size. | |||
| 554 | for (uint64_t i = 0; i != Count; ++i) { | |||
| 555 | switch (AF.getValueSize()) { | |||
| 556 | default: llvm_unreachable("Invalid size!")__builtin_unreachable(); | |||
| 557 | case 1: OS << char(AF.getValue()); break; | |||
| 558 | case 2: | |||
| 559 | support::endian::write<uint16_t>(OS, AF.getValue(), Endian); | |||
| 560 | break; | |||
| 561 | case 4: | |||
| 562 | support::endian::write<uint32_t>(OS, AF.getValue(), Endian); | |||
| 563 | break; | |||
| 564 | case 8: | |||
| 565 | support::endian::write<uint64_t>(OS, AF.getValue(), Endian); | |||
| 566 | break; | |||
| 567 | } | |||
| 568 | } | |||
| 569 | break; | |||
| 570 | } | |||
| 571 | ||||
| 572 | case MCFragment::FT_Data: | |||
| 573 | ++stats::EmittedDataFragments; | |||
| 574 | OS << cast<MCDataFragment>(F).getContents(); | |||
| 575 | break; | |||
| 576 | ||||
| 577 | case MCFragment::FT_Relaxable: | |||
| 578 | ++stats::EmittedRelaxableFragments; | |||
| 579 | OS << cast<MCRelaxableFragment>(F).getContents(); | |||
| 580 | break; | |||
| 581 | ||||
| 582 | case MCFragment::FT_CompactEncodedInst: | |||
| 583 | ++stats::EmittedCompactEncodedInstFragments; | |||
| 584 | OS << cast<MCCompactEncodedInstFragment>(F).getContents(); | |||
| 585 | break; | |||
| 586 | ||||
| 587 | case MCFragment::FT_Fill: { | |||
| 588 | ++stats::EmittedFillFragments; | |||
| 589 | const MCFillFragment &FF = cast<MCFillFragment>(F); | |||
| 590 | uint64_t V = FF.getValue(); | |||
| 591 | unsigned VSize = FF.getValueSize(); | |||
| 592 | const unsigned MaxChunkSize = 16; | |||
| 593 | char Data[MaxChunkSize]; | |||
| 594 | assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size")((void)0); | |||
| 595 | // Duplicate V into Data as byte vector to reduce number of | |||
| 596 | // writes done. As such, do endian conversion here. | |||
| 597 | for (unsigned I = 0; I != VSize; ++I) { | |||
| 598 | unsigned index = Endian == support::little ? I : (VSize - I - 1); | |||
| 599 | Data[I] = uint8_t(V >> (index * 8)); | |||
| 600 | } | |||
| 601 | for (unsigned I = VSize; I < MaxChunkSize; ++I) | |||
| 602 | Data[I] = Data[I - VSize]; | |||
| 603 | ||||
| 604 | // Set to largest multiple of VSize in Data. | |||
| 605 | const unsigned NumPerChunk = MaxChunkSize / VSize; | |||
| 606 | // Set ChunkSize to largest multiple of VSize in Data | |||
| 607 | const unsigned ChunkSize = VSize * NumPerChunk; | |||
| 608 | ||||
| 609 | // Do copies by chunk. | |||
| 610 | StringRef Ref(Data, ChunkSize); | |||
| 611 | for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I) | |||
| 612 | OS << Ref; | |||
| 613 | ||||
| 614 | // do remainder if needed. | |||
| 615 | unsigned TrailingCount = FragmentSize % ChunkSize; | |||
| 616 | if (TrailingCount) | |||
| 617 | OS.write(Data, TrailingCount); | |||
| 618 | break; | |||
| 619 | } | |||
| 620 | ||||
| 621 | case MCFragment::FT_Nops: { | |||
| 622 | ++stats::EmittedNopsFragments; | |||
| 623 | const MCNopsFragment &NF = cast<MCNopsFragment>(F); | |||
| 624 | int64_t NumBytes = NF.getNumBytes(); | |||
| 625 | int64_t ControlledNopLength = NF.getControlledNopLength(); | |||
| 626 | int64_t MaximumNopLength = Asm.getBackend().getMaximumNopSize(); | |||
| 627 | ||||
| 628 | assert(NumBytes > 0 && "Expected positive NOPs fragment size")((void)0); | |||
| 629 | assert(ControlledNopLength >= 0 && "Expected non-negative NOP size")((void)0); | |||
| 630 | ||||
| 631 | if (ControlledNopLength > MaximumNopLength) { | |||
| 632 | Asm.getContext().reportError(NF.getLoc(), | |||
| 633 | "illegal NOP size " + | |||
| 634 | std::to_string(ControlledNopLength) + | |||
| 635 | ". (expected within [0, " + | |||
| 636 | std::to_string(MaximumNopLength) + "])"); | |||
| 637 | // Clamp the NOP length as reportError does not stop the execution | |||
| 638 | // immediately. | |||
| 639 | ControlledNopLength = MaximumNopLength; | |||
| 640 | } | |||
| 641 | ||||
| 642 | // Use maximum value if the size of each NOP is not specified | |||
| 643 | if (!ControlledNopLength) | |||
| 644 | ControlledNopLength = MaximumNopLength; | |||
| 645 | ||||
| 646 | while (NumBytes) { | |||
| 647 | uint64_t NumBytesToEmit = | |||
| 648 | (uint64_t)std::min(NumBytes, ControlledNopLength); | |||
| 649 | assert(NumBytesToEmit && "try to emit empty NOP instruction")((void)0); | |||
| 650 | if (!Asm.getBackend().writeNopData(OS, NumBytesToEmit)) { | |||
| 651 | report_fatal_error("unable to write nop sequence of the remaining " + | |||
| 652 | Twine(NumBytesToEmit) + " bytes"); | |||
| 653 | break; | |||
| 654 | } | |||
| 655 | NumBytes -= NumBytesToEmit; | |||
| 656 | } | |||
| 657 | break; | |||
| 658 | } | |||
| 659 | ||||
| 660 | case MCFragment::FT_LEB: { | |||
| 661 | const MCLEBFragment &LF = cast<MCLEBFragment>(F); | |||
| 662 | OS << LF.getContents(); | |||
| 663 | break; | |||
| 664 | } | |||
| 665 | ||||
| 666 | case MCFragment::FT_BoundaryAlign: { | |||
| 667 | if (!Asm.getBackend().writeNopData(OS, FragmentSize)) | |||
| 668 | report_fatal_error("unable to write nop sequence of " + | |||
| 669 | Twine(FragmentSize) + " bytes"); | |||
| 670 | break; | |||
| 671 | } | |||
| 672 | ||||
| 673 | case MCFragment::FT_SymbolId: { | |||
| 674 | const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(F); | |||
| 675 | support::endian::write<uint32_t>(OS, SF.getSymbol()->getIndex(), Endian); | |||
| 676 | break; | |||
| 677 | } | |||
| 678 | ||||
| 679 | case MCFragment::FT_Org: { | |||
| 680 | ++stats::EmittedOrgFragments; | |||
| 681 | const MCOrgFragment &OF = cast<MCOrgFragment>(F); | |||
| 682 | ||||
| 683 | for (uint64_t i = 0, e = FragmentSize; i != e; ++i) | |||
| 684 | OS << char(OF.getValue()); | |||
| 685 | ||||
| 686 | break; | |||
| 687 | } | |||
| 688 | ||||
| 689 | case MCFragment::FT_Dwarf: { | |||
| 690 | const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); | |||
| 691 | OS << OF.getContents(); | |||
| 692 | break; | |||
| 693 | } | |||
| 694 | case MCFragment::FT_DwarfFrame: { | |||
| 695 | const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); | |||
| 696 | OS << CF.getContents(); | |||
| 697 | break; | |||
| 698 | } | |||
| 699 | case MCFragment::FT_CVInlineLines: { | |||
| 700 | const auto &OF = cast<MCCVInlineLineTableFragment>(F); | |||
| 701 | OS << OF.getContents(); | |||
| 702 | break; | |||
| 703 | } | |||
| 704 | case MCFragment::FT_CVDefRange: { | |||
| 705 | const auto &DRF = cast<MCCVDefRangeFragment>(F); | |||
| 706 | OS << DRF.getContents(); | |||
| 707 | break; | |||
| 708 | } | |||
| 709 | case MCFragment::FT_PseudoProbe: { | |||
| 710 | const MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(F); | |||
| 711 | OS << PF.getContents(); | |||
| 712 | break; | |||
| 713 | } | |||
| 714 | case MCFragment::FT_Dummy: | |||
| 715 | llvm_unreachable("Should not have been added")__builtin_unreachable(); | |||
| 716 | } | |||
| 717 | ||||
| 718 | assert(OS.tell() - Start == FragmentSize &&((void)0) | |||
| 719 | "The stream should advance by fragment size")((void)0); | |||
| 720 | } | |||
| 721 | ||||
| 722 | void MCAssembler::writeSectionData(raw_ostream &OS, const MCSection *Sec, | |||
| 723 | const MCAsmLayout &Layout) const { | |||
| 724 | assert(getBackendPtr() && "Expected assembler backend")((void)0); | |||
| 725 | ||||
| 726 | // Ignore virtual sections. | |||
| 727 | if (Sec->isVirtualSection()) { | |||
| 728 | assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!")((void)0); | |||
| 729 | ||||
| 730 | // Check that contents are only things legal inside a virtual section. | |||
| 731 | for (const MCFragment &F : *Sec) { | |||
| 732 | switch (F.getKind()) { | |||
| 733 | default: llvm_unreachable("Invalid fragment in virtual section!")__builtin_unreachable(); | |||
| 734 | case MCFragment::FT_Data: { | |||
| 735 | // Check that we aren't trying to write a non-zero contents (or fixups) | |||
| 736 | // into a virtual section. This is to support clients which use standard | |||
| 737 | // directives to fill the contents of virtual sections. | |||
| 738 | const MCDataFragment &DF = cast<MCDataFragment>(F); | |||
| 739 | if (DF.fixup_begin() != DF.fixup_end()) | |||
| 740 | getContext().reportError(SMLoc(), Sec->getVirtualSectionKind() + | |||
| 741 | " section '" + Sec->getName() + | |||
| 742 | "' cannot have fixups"); | |||
| 743 | for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) | |||
| 744 | if (DF.getContents()[i]) { | |||
| 745 | getContext().reportError(SMLoc(), | |||
| 746 | Sec->getVirtualSectionKind() + | |||
| 747 | " section '" + Sec->getName() + | |||
| 748 | "' cannot have non-zero initializers"); | |||
| 749 | break; | |||
| 750 | } | |||
| 751 | break; | |||
| 752 | } | |||
| 753 | case MCFragment::FT_Align: | |||
| 754 | // Check that we aren't trying to write a non-zero value into a virtual | |||
| 755 | // section. | |||
| 756 | assert((cast<MCAlignFragment>(F).getValueSize() == 0 ||((void)0) | |||
| 757 | cast<MCAlignFragment>(F).getValue() == 0) &&((void)0) | |||
| 758 | "Invalid align in virtual section!")((void)0); | |||
| 759 | break; | |||
| 760 | case MCFragment::FT_Fill: | |||
| 761 | assert((cast<MCFillFragment>(F).getValue() == 0) &&((void)0) | |||
| 762 | "Invalid fill in virtual section!")((void)0); | |||
| 763 | break; | |||
| 764 | case MCFragment::FT_Org: | |||
| 765 | break; | |||
| 766 | } | |||
| 767 | } | |||
| 768 | ||||
| 769 | return; | |||
| 770 | } | |||
| 771 | ||||
| 772 | uint64_t Start = OS.tell(); | |||
| 773 | (void)Start; | |||
| 774 | ||||
| 775 | for (const MCFragment &F : *Sec) | |||
| 776 | writeFragment(OS, *this, Layout, F); | |||
| 777 | ||||
| 778 | assert(getContext().hadError() ||((void)0) | |||
| 779 | OS.tell() - Start == Layout.getSectionAddressSize(Sec))((void)0); | |||
| 780 | } | |||
| 781 | ||||
| 782 | std::tuple<MCValue, uint64_t, bool> | |||
| 783 | MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F, | |||
| 784 | const MCFixup &Fixup) { | |||
| 785 | // Evaluate the fixup. | |||
| 786 | MCValue Target; | |||
| 787 | uint64_t FixedValue; | |||
| 788 | bool WasForced; | |||
| 789 | bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue, | |||
| 790 | WasForced); | |||
| 791 | if (!IsResolved) { | |||
| 792 | // The fixup was unresolved, we need a relocation. Inform the object | |||
| 793 | // writer of the relocation, and give it an opportunity to adjust the | |||
| 794 | // fixup value if need be. | |||
| 795 | getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, FixedValue); | |||
| 796 | } | |||
| 797 | return std::make_tuple(Target, FixedValue, IsResolved); | |||
| 798 | } | |||
| 799 | ||||
| 800 | void MCAssembler::layout(MCAsmLayout &Layout) { | |||
| 801 | assert(getBackendPtr() && "Expected assembler backend")((void)0); | |||
| 802 | DEBUG_WITH_TYPE("mc-dump", {do { } while (false) | |||
| 803 | errs() << "assembler backend - pre-layout\n--\n";do { } while (false) | |||
| 804 | dump(); })do { } while (false); | |||
| 805 | ||||
| 806 | // Create dummy fragments and assign section ordinals. | |||
| 807 | unsigned SectionIndex = 0; | |||
| 808 | for (MCSection &Sec : *this) { | |||
| 809 | // Create dummy fragments to eliminate any empty sections, this simplifies | |||
| 810 | // layout. | |||
| 811 | if (Sec.getFragmentList().empty()) | |||
| 812 | new MCDataFragment(&Sec); | |||
| 813 | ||||
| 814 | Sec.setOrdinal(SectionIndex++); | |||
| 815 | } | |||
| 816 | ||||
| 817 | // Assign layout order indices to sections and fragments. | |||
| 818 | for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) { | |||
| 819 | MCSection *Sec = Layout.getSectionOrder()[i]; | |||
| 820 | Sec->setLayoutOrder(i); | |||
| 821 | ||||
| 822 | unsigned FragmentIndex = 0; | |||
| 823 | for (MCFragment &Frag : *Sec) | |||
| 824 | Frag.setLayoutOrder(FragmentIndex++); | |||
| 825 | } | |||
| 826 | ||||
| 827 | // Layout until everything fits. | |||
| 828 | while (layoutOnce(Layout)) { | |||
| 829 | if (getContext().hadError()) | |||
| 830 | return; | |||
| 831 | // Size of fragments in one section can depend on the size of fragments in | |||
| 832 | // another. If any fragment has changed size, we have to re-layout (and | |||
| 833 | // as a result possibly further relax) all. | |||
| 834 | for (MCSection &Sec : *this) | |||
| 835 | Layout.invalidateFragmentsFrom(&*Sec.begin()); | |||
| 836 | } | |||
| 837 | ||||
| 838 | DEBUG_WITH_TYPE("mc-dump", {do { } while (false) | |||
| 839 | errs() << "assembler backend - post-relaxation\n--\n";do { } while (false) | |||
| 840 | dump(); })do { } while (false); | |||
| 841 | ||||
| 842 | // Finalize the layout, including fragment lowering. | |||
| 843 | finishLayout(Layout); | |||
| 844 | ||||
| 845 | DEBUG_WITH_TYPE("mc-dump", {do { } while (false) | |||
| 846 | errs() << "assembler backend - final-layout\n--\n";do { } while (false) | |||
| 847 | dump(); })do { } while (false); | |||
| 848 | ||||
| 849 | // Allow the object writer a chance to perform post-layout binding (for | |||
| 850 | // example, to set the index fields in the symbol data). | |||
| 851 | getWriter().executePostLayoutBinding(*this, Layout); | |||
| 852 | ||||
| 853 | // Evaluate and apply the fixups, generating relocation entries as necessary. | |||
| 854 | for (MCSection &Sec : *this) { | |||
| 855 | for (MCFragment &Frag : Sec) { | |||
| 856 | ArrayRef<MCFixup> Fixups; | |||
| 857 | MutableArrayRef<char> Contents; | |||
| 858 | const MCSubtargetInfo *STI = nullptr; | |||
| 859 | ||||
| 860 | // Process MCAlignFragment and MCEncodedFragmentWithFixups here. | |||
| 861 | switch (Frag.getKind()) { | |||
| 862 | default: | |||
| 863 | continue; | |||
| 864 | case MCFragment::FT_Align: { | |||
| 865 | MCAlignFragment &AF = cast<MCAlignFragment>(Frag); | |||
| 866 | // Insert fixup type for code alignment if the target define | |||
| 867 | // shouldInsertFixupForCodeAlign target hook. | |||
| 868 | if (Sec.UseCodeAlign() && AF.hasEmitNops()) | |||
| 869 | getBackend().shouldInsertFixupForCodeAlign(*this, Layout, AF); | |||
| 870 | continue; | |||
| 871 | } | |||
| 872 | case MCFragment::FT_Data: { | |||
| 873 | MCDataFragment &DF = cast<MCDataFragment>(Frag); | |||
| 874 | Fixups = DF.getFixups(); | |||
| 875 | Contents = DF.getContents(); | |||
| 876 | STI = DF.getSubtargetInfo(); | |||
| 877 | assert(!DF.hasInstructions() || STI != nullptr)((void)0); | |||
| 878 | break; | |||
| 879 | } | |||
| 880 | case MCFragment::FT_Relaxable: { | |||
| 881 | MCRelaxableFragment &RF = cast<MCRelaxableFragment>(Frag); | |||
| 882 | Fixups = RF.getFixups(); | |||
| 883 | Contents = RF.getContents(); | |||
| 884 | STI = RF.getSubtargetInfo(); | |||
| 885 | assert(!RF.hasInstructions() || STI != nullptr)((void)0); | |||
| 886 | break; | |||
| 887 | } | |||
| 888 | case MCFragment::FT_CVDefRange: { | |||
| 889 | MCCVDefRangeFragment &CF = cast<MCCVDefRangeFragment>(Frag); | |||
| 890 | Fixups = CF.getFixups(); | |||
| 891 | Contents = CF.getContents(); | |||
| 892 | break; | |||
| 893 | } | |||
| 894 | case MCFragment::FT_Dwarf: { | |||
| 895 | MCDwarfLineAddrFragment &DF = cast<MCDwarfLineAddrFragment>(Frag); | |||
| 896 | Fixups = DF.getFixups(); | |||
| 897 | Contents = DF.getContents(); | |||
| 898 | break; | |||
| 899 | } | |||
| 900 | case MCFragment::FT_DwarfFrame: { | |||
| 901 | MCDwarfCallFrameFragment &DF = cast<MCDwarfCallFrameFragment>(Frag); | |||
| 902 | Fixups = DF.getFixups(); | |||
| 903 | Contents = DF.getContents(); | |||
| 904 | break; | |||
| 905 | } | |||
| 906 | case MCFragment::FT_PseudoProbe: { | |||
| 907 | MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(Frag); | |||
| 908 | Fixups = PF.getFixups(); | |||
| 909 | Contents = PF.getContents(); | |||
| 910 | break; | |||
| 911 | } | |||
| 912 | } | |||
| 913 | for (const MCFixup &Fixup : Fixups) { | |||
| 914 | uint64_t FixedValue; | |||
| 915 | bool IsResolved; | |||
| 916 | MCValue Target; | |||
| 917 | std::tie(Target, FixedValue, IsResolved) = | |||
| 918 | handleFixup(Layout, Frag, Fixup); | |||
| 919 | getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue, | |||
| 920 | IsResolved, STI); | |||
| 921 | } | |||
| 922 | } | |||
| 923 | } | |||
| 924 | } | |||
| 925 | ||||
| 926 | void MCAssembler::Finish() { | |||
| 927 | // Create the layout object. | |||
| 928 | MCAsmLayout Layout(*this); | |||
| 929 | layout(Layout); | |||
| ||||
| 930 | ||||
| 931 | // Write the object file. | |||
| 932 | stats::ObjectBytes += getWriter().writeObject(*this, Layout); | |||
| 933 | } | |||
| 934 | ||||
| 935 | bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, | |||
| 936 | const MCRelaxableFragment *DF, | |||
| 937 | const MCAsmLayout &Layout) const { | |||
| 938 | assert(getBackendPtr() && "Expected assembler backend")((void)0); | |||
| 939 | MCValue Target; | |||
| 940 | uint64_t Value; | |||
| 941 | bool WasForced; | |||
| 942 | bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value, WasForced); | |||
| 943 | if (Target.getSymA() && | |||
| 944 | Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && | |||
| 945 | Fixup.getKind() == FK_Data_1) | |||
| 946 | return false; | |||
| 947 | return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF, | |||
| 948 | Layout, WasForced); | |||
| 949 | } | |||
| 950 | ||||
| 951 | bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, | |||
| 952 | const MCAsmLayout &Layout) const { | |||
| 953 | assert(getBackendPtr() && "Expected assembler backend")((void)0); | |||
| 954 | // If this inst doesn't ever need relaxation, ignore it. This occurs when we | |||
| 955 | // are intentionally pushing out inst fragments, or because we relaxed a | |||
| 956 | // previous instruction to one that doesn't need relaxation. | |||
| 957 | if (!getBackend().mayNeedRelaxation(F->getInst(), *F->getSubtargetInfo())) | |||
| 958 | return false; | |||
| 959 | ||||
| 960 | for (const MCFixup &Fixup : F->getFixups()) | |||
| 961 | if (fixupNeedsRelaxation(Fixup, F, Layout)) | |||
| 962 | return true; | |||
| 963 | ||||
| 964 | return false; | |||
| 965 | } | |||
| 966 | ||||
| 967 | bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, | |||
| 968 | MCRelaxableFragment &F) { | |||
| 969 | assert(getEmitterPtr() &&((void)0) | |||
| 970 | "Expected CodeEmitter defined for relaxInstruction")((void)0); | |||
| 971 | if (!fragmentNeedsRelaxation(&F, Layout)) | |||
| 972 | return false; | |||
| 973 | ||||
| 974 | ++stats::RelaxedInstructions; | |||
| 975 | ||||
| 976 | // FIXME-PERF: We could immediately lower out instructions if we can tell | |||
| 977 | // they are fully resolved, to avoid retesting on later passes. | |||
| 978 | ||||
| 979 | // Relax the fragment. | |||
| 980 | ||||
| 981 | MCInst Relaxed = F.getInst(); | |||
| 982 | getBackend().relaxInstruction(Relaxed, *F.getSubtargetInfo()); | |||
| 983 | ||||
| 984 | // Encode the new instruction. | |||
| 985 | // | |||
| 986 | // FIXME-PERF: If it matters, we could let the target do this. It can | |||
| 987 | // probably do so more efficiently in many cases. | |||
| 988 | SmallVector<MCFixup, 4> Fixups; | |||
| 989 | SmallString<256> Code; | |||
| 990 | raw_svector_ostream VecOS(Code); | |||
| 991 | getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, *F.getSubtargetInfo()); | |||
| 992 | ||||
| 993 | // Update the fragment. | |||
| 994 | F.setInst(Relaxed); | |||
| 995 | F.getContents() = Code; | |||
| 996 | F.getFixups() = Fixups; | |||
| 997 | ||||
| 998 | return true; | |||
| 999 | } | |||
| 1000 | ||||
| 1001 | bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { | |||
| 1002 | uint64_t OldSize = LF.getContents().size(); | |||
| 1003 | int64_t Value; | |||
| 1004 | bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout); | |||
| 1005 | if (!Abs) | |||
| 1006 | report_fatal_error("sleb128 and uleb128 expressions must be absolute"); | |||
| 1007 | SmallString<8> &Data = LF.getContents(); | |||
| 1008 | Data.clear(); | |||
| 1009 | raw_svector_ostream OSE(Data); | |||
| 1010 | // The compiler can generate EH table assembly that is impossible to assemble | |||
| 1011 | // without either adding padding to an LEB fragment or adding extra padding | |||
| 1012 | // to a later alignment fragment. To accommodate such tables, relaxation can | |||
| 1013 | // only increase an LEB fragment size here, not decrease it. See PR35809. | |||
| 1014 | if (LF.isSigned()) | |||
| 1015 | encodeSLEB128(Value, OSE, OldSize); | |||
| 1016 | else | |||
| 1017 | encodeULEB128(Value, OSE, OldSize); | |||
| 1018 | return OldSize != LF.getContents().size(); | |||
| 1019 | } | |||
| 1020 | ||||
| 1021 | /// Check if the branch crosses the boundary. | |||
| 1022 | /// | |||
| 1023 | /// \param StartAddr start address of the fused/unfused branch. | |||
| 1024 | /// \param Size size of the fused/unfused branch. | |||
| 1025 | /// \param BoundaryAlignment alignment requirement of the branch. | |||
| 1026 | /// \returns true if the branch cross the boundary. | |||
| 1027 | static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, | |||
| 1028 | Align BoundaryAlignment) { | |||
| 1029 | uint64_t EndAddr = StartAddr + Size; | |||
| 1030 | return (StartAddr >> Log2(BoundaryAlignment)) != | |||
| 1031 | ((EndAddr - 1) >> Log2(BoundaryAlignment)); | |||
| 1032 | } | |||
| 1033 | ||||
| 1034 | /// Check if the branch is against the boundary. | |||
| 1035 | /// | |||
| 1036 | /// \param StartAddr start address of the fused/unfused branch. | |||
| 1037 | /// \param Size size of the fused/unfused branch. | |||
| 1038 | /// \param BoundaryAlignment alignment requirement of the branch. | |||
| 1039 | /// \returns true if the branch is against the boundary. | |||
| 1040 | static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, | |||
| 1041 | Align BoundaryAlignment) { | |||
| 1042 | uint64_t EndAddr = StartAddr + Size; | |||
| 1043 | return (EndAddr & (BoundaryAlignment.value() - 1)) == 0; | |||
| 1044 | } | |||
| 1045 | ||||
| 1046 | /// Check if the branch needs padding. | |||
| 1047 | /// | |||
| 1048 | /// \param StartAddr start address of the fused/unfused branch. | |||
| 1049 | /// \param Size size of the fused/unfused branch. | |||
| 1050 | /// \param BoundaryAlignment alignment requirement of the branch. | |||
| 1051 | /// \returns true if the branch needs padding. | |||
| 1052 | static bool needPadding(uint64_t StartAddr, uint64_t Size, | |||
| 1053 | Align BoundaryAlignment) { | |||
| 1054 | return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) || | |||
| 1055 | isAgainstBoundary(StartAddr, Size, BoundaryAlignment); | |||
| 1056 | } | |||
| 1057 | ||||
| 1058 | bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout, | |||
| 1059 | MCBoundaryAlignFragment &BF) { | |||
| 1060 | // BoundaryAlignFragment that doesn't need to align any fragment should not be | |||
| 1061 | // relaxed. | |||
| 1062 | if (!BF.getLastFragment()) | |||
| 1063 | return false; | |||
| 1064 | ||||
| 1065 | uint64_t AlignedOffset = Layout.getFragmentOffset(&BF); | |||
| 1066 | uint64_t AlignedSize = 0; | |||
| 1067 | for (const MCFragment *F = BF.getLastFragment(); F != &BF; | |||
| 1068 | F = F->getPrevNode()) | |||
| 1069 | AlignedSize += computeFragmentSize(Layout, *F); | |||
| 1070 | ||||
| 1071 | Align BoundaryAlignment = BF.getAlignment(); | |||
| 1072 | uint64_t NewSize = needPadding(AlignedOffset, AlignedSize, BoundaryAlignment) | |||
| 1073 | ? offsetToAlignment(AlignedOffset, BoundaryAlignment) | |||
| 1074 | : 0U; | |||
| 1075 | if (NewSize == BF.getSize()) | |||
| 1076 | return false; | |||
| 1077 | BF.setSize(NewSize); | |||
| 1078 | Layout.invalidateFragmentsFrom(&BF); | |||
| 1079 | return true; | |||
| 1080 | } | |||
| 1081 | ||||
| 1082 | bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, | |||
| 1083 | MCDwarfLineAddrFragment &DF) { | |||
| 1084 | ||||
| 1085 | bool WasRelaxed; | |||
| 1086 | if (getBackend().relaxDwarfLineAddr(DF, Layout, WasRelaxed)) | |||
| 1087 | return WasRelaxed; | |||
| 1088 | ||||
| 1089 | MCContext &Context = Layout.getAssembler().getContext(); | |||
| 1090 | uint64_t OldSize = DF.getContents().size(); | |||
| 1091 | int64_t AddrDelta; | |||
| 1092 | bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); | |||
| 1093 | assert(Abs && "We created a line delta with an invalid expression")((void)0); | |||
| 1094 | (void)Abs; | |||
| 1095 | int64_t LineDelta; | |||
| 1096 | LineDelta = DF.getLineDelta(); | |||
| 1097 | SmallVectorImpl<char> &Data = DF.getContents(); | |||
| 1098 | Data.clear(); | |||
| 1099 | raw_svector_ostream OSE(Data); | |||
| 1100 | DF.getFixups().clear(); | |||
| 1101 | ||||
| 1102 | MCDwarfLineAddr::Encode(Context, getDWARFLinetableParams(), LineDelta, | |||
| 1103 | AddrDelta, OSE); | |||
| 1104 | return OldSize != Data.size(); | |||
| 1105 | } | |||
| 1106 | ||||
| 1107 | bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, | |||
| 1108 | MCDwarfCallFrameFragment &DF) { | |||
| 1109 | bool WasRelaxed; | |||
| 1110 | if (getBackend().relaxDwarfCFA(DF, Layout, WasRelaxed)) | |||
| 1111 | return WasRelaxed; | |||
| 1112 | ||||
| 1113 | MCContext &Context = Layout.getAssembler().getContext(); | |||
| 1114 | uint64_t OldSize = DF.getContents().size(); | |||
| 1115 | int64_t AddrDelta; | |||
| 1116 | bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); | |||
| 1117 | assert(Abs && "We created call frame with an invalid expression")((void)0); | |||
| 1118 | (void) Abs; | |||
| 1119 | SmallVectorImpl<char> &Data = DF.getContents(); | |||
| 1120 | Data.clear(); | |||
| 1121 | raw_svector_ostream OSE(Data); | |||
| 1122 | DF.getFixups().clear(); | |||
| 1123 | ||||
| 1124 | MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE); | |||
| 1125 | return OldSize != Data.size(); | |||
| 1126 | } | |||
| 1127 | ||||
| 1128 | bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout, | |||
| 1129 | MCCVInlineLineTableFragment &F) { | |||
| 1130 | unsigned OldSize = F.getContents().size(); | |||
| 1131 | getContext().getCVContext().encodeInlineLineTable(Layout, F); | |||
| 1132 | return OldSize != F.getContents().size(); | |||
| 1133 | } | |||
| 1134 | ||||
| 1135 | bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout, | |||
| 1136 | MCCVDefRangeFragment &F) { | |||
| 1137 | unsigned OldSize = F.getContents().size(); | |||
| 1138 | getContext().getCVContext().encodeDefRange(Layout, F); | |||
| 1139 | return OldSize != F.getContents().size(); | |||
| 1140 | } | |||
| 1141 | ||||
| 1142 | bool MCAssembler::relaxPseudoProbeAddr(MCAsmLayout &Layout, | |||
| 1143 | MCPseudoProbeAddrFragment &PF) { | |||
| 1144 | uint64_t OldSize = PF.getContents().size(); | |||
| 1145 | int64_t AddrDelta; | |||
| 1146 | bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); | |||
| 1147 | assert(Abs && "We created a pseudo probe with an invalid expression")((void)0); | |||
| 1148 | (void)Abs; | |||
| 1149 | SmallVectorImpl<char> &Data = PF.getContents(); | |||
| 1150 | Data.clear(); | |||
| 1151 | raw_svector_ostream OSE(Data); | |||
| 1152 | PF.getFixups().clear(); | |||
| 1153 | ||||
| 1154 | // AddrDelta is a signed integer | |||
| 1155 | encodeSLEB128(AddrDelta, OSE, OldSize); | |||
| 1156 | return OldSize != Data.size(); | |||
| 1157 | } | |||
| 1158 | ||||
| 1159 | bool MCAssembler::relaxFragment(MCAsmLayout &Layout, MCFragment &F) { | |||
| 1160 | switch(F.getKind()) { | |||
| 1161 | default: | |||
| 1162 | return false; | |||
| 1163 | case MCFragment::FT_Relaxable: | |||
| 1164 | assert(!getRelaxAll() &&((void)0) | |||
| 1165 | "Did not expect a MCRelaxableFragment in RelaxAll mode")((void)0); | |||
| 1166 | return relaxInstruction(Layout, cast<MCRelaxableFragment>(F)); | |||
| 1167 | case MCFragment::FT_Dwarf: | |||
| 1168 | return relaxDwarfLineAddr(Layout, cast<MCDwarfLineAddrFragment>(F)); | |||
| 1169 | case MCFragment::FT_DwarfFrame: | |||
| 1170 | return relaxDwarfCallFrameFragment(Layout, | |||
| 1171 | cast<MCDwarfCallFrameFragment>(F)); | |||
| 1172 | case MCFragment::FT_LEB: | |||
| 1173 | return relaxLEB(Layout, cast<MCLEBFragment>(F)); | |||
| 1174 | case MCFragment::FT_BoundaryAlign: | |||
| 1175 | return relaxBoundaryAlign(Layout, cast<MCBoundaryAlignFragment>(F)); | |||
| 1176 | case MCFragment::FT_CVInlineLines: | |||
| 1177 | return relaxCVInlineLineTable(Layout, cast<MCCVInlineLineTableFragment>(F)); | |||
| 1178 | case MCFragment::FT_CVDefRange: | |||
| 1179 | return relaxCVDefRange(Layout, cast<MCCVDefRangeFragment>(F)); | |||
| 1180 | case MCFragment::FT_PseudoProbe: | |||
| 1181 | return relaxPseudoProbeAddr(Layout, cast<MCPseudoProbeAddrFragment>(F)); | |||
| 1182 | } | |||
| 1183 | } | |||
| 1184 | ||||
| 1185 | bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) { | |||
| 1186 | // Holds the first fragment which needed relaxing during this layout. It will | |||
| 1187 | // remain NULL if none were relaxed. | |||
| 1188 | // When a fragment is relaxed, all the fragments following it should get | |||
| 1189 | // invalidated because their offset is going to change. | |||
| 1190 | MCFragment *FirstRelaxedFragment = nullptr; | |||
| 1191 | ||||
| 1192 | // Attempt to relax all the fragments in the section. | |||
| 1193 | for (MCFragment &Frag : Sec) { | |||
| 1194 | // Check if this is a fragment that needs relaxation. | |||
| 1195 | bool RelaxedFrag = relaxFragment(Layout, Frag); | |||
| 1196 | if (RelaxedFrag && !FirstRelaxedFragment) | |||
| 1197 | FirstRelaxedFragment = &Frag; | |||
| 1198 | } | |||
| 1199 | if (FirstRelaxedFragment) { | |||
| 1200 | Layout.invalidateFragmentsFrom(FirstRelaxedFragment); | |||
| 1201 | return true; | |||
| 1202 | } | |||
| 1203 | return false; | |||
| 1204 | } | |||
| 1205 | ||||
| 1206 | bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { | |||
| 1207 | ++stats::RelaxationSteps; | |||
| 1208 | ||||
| 1209 | bool WasRelaxed = false; | |||
| 1210 | for (MCSection &Sec : *this) { | |||
| 1211 | while (layoutSectionOnce(Layout, Sec)) | |||
| 1212 | WasRelaxed = true; | |||
| 1213 | } | |||
| 1214 | ||||
| 1215 | return WasRelaxed; | |||
| 1216 | } | |||
| 1217 | ||||
| 1218 | void MCAssembler::finishLayout(MCAsmLayout &Layout) { | |||
| 1219 | assert(getBackendPtr() && "Expected assembler backend")((void)0); | |||
| 1220 | // The layout is done. Mark every fragment as valid. | |||
| 1221 | for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { | |||
| 1222 | MCSection &Section = *Layout.getSectionOrder()[i]; | |||
| 1223 | Layout.getFragmentOffset(&*Section.getFragmentList().rbegin()); | |||
| 1224 | computeFragmentSize(Layout, *Section.getFragmentList().rbegin()); | |||
| 1225 | } | |||
| 1226 | getBackend().finishLayout(*this, Layout); | |||
| 1227 | } | |||
| 1228 | ||||
| 1229 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
| 1230 | LLVM_DUMP_METHOD__attribute__((noinline)) void MCAssembler::dump() const{ | |||
| 1231 | raw_ostream &OS = errs(); | |||
| 1232 | ||||
| 1233 | OS << "<MCAssembler\n"; | |||
| 1234 | OS << " Sections:[\n "; | |||
| 1235 | for (const_iterator it = begin(), ie = end(); it != ie; ++it) { | |||
| 1236 | if (it != begin()) OS << ",\n "; | |||
| 1237 | it->dump(); | |||
| 1238 | } | |||
| 1239 | OS << "],\n"; | |||
| 1240 | OS << " Symbols:["; | |||
| 1241 | ||||
| 1242 | for (const_symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) { | |||
| 1243 | if (it != symbol_begin()) OS << ",\n "; | |||
| 1244 | OS << "("; | |||
| 1245 | it->dump(); | |||
| 1246 | OS << ", Index:" << it->getIndex() << ", "; | |||
| 1247 | OS << ")"; | |||
| 1248 | } | |||
| 1249 | OS << "]>\n"; | |||
| 1250 | } | |||
| 1251 | #endif |
| 1 | //===-- llvm/Support/Alignment.h - Useful alignment functions ---*- C++ -*-===// | |||
| 2 | // | |||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | |||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
| 6 | // | |||
| 7 | //===----------------------------------------------------------------------===// | |||
| 8 | // | |||
| 9 | // This file contains types to represent alignments. | |||
| 10 | // They are instrumented to guarantee some invariants are preserved and prevent | |||
| 11 | // invalid manipulations. | |||
| 12 | // | |||
| 13 | // - Align represents an alignment in bytes, it is always set and always a valid | |||
| 14 | // power of two, its minimum value is 1 which means no alignment requirements. | |||
| 15 | // | |||
| 16 | // - MaybeAlign is an optional type, it may be undefined or set. When it's set | |||
| 17 | // you can get the underlying Align type by using the getValue() method. | |||
| 18 | // | |||
| 19 | //===----------------------------------------------------------------------===// | |||
| 20 | ||||
| 21 | #ifndef LLVM_SUPPORT_ALIGNMENT_H_ | |||
| 22 | #define LLVM_SUPPORT_ALIGNMENT_H_ | |||
| 23 | ||||
| 24 | #include "llvm/ADT/Optional.h" | |||
| 25 | #include "llvm/Support/MathExtras.h" | |||
| 26 | #include <cassert> | |||
| 27 | #ifndef NDEBUG1 | |||
| 28 | #include <string> | |||
| 29 | #endif // NDEBUG | |||
| 30 | ||||
| 31 | namespace llvm { | |||
| 32 | ||||
| 33 | #define ALIGN_CHECK_ISPOSITIVE(decl) \ | |||
| 34 | assert(decl > 0 && (#decl " should be defined"))((void)0) | |||
| 35 | ||||
| 36 | /// This struct is a compact representation of a valid (non-zero power of two) | |||
| 37 | /// alignment. | |||
| 38 | /// It is suitable for use as static global constants. | |||
| 39 | struct Align { | |||
| 40 | private: | |||
| 41 | uint8_t ShiftValue = 0; /// The log2 of the required alignment. | |||
| 42 | /// ShiftValue is less than 64 by construction. | |||
| 43 | ||||
| 44 | friend struct MaybeAlign; | |||
| 45 | friend unsigned Log2(Align); | |||
| 46 | friend bool operator==(Align Lhs, Align Rhs); | |||
| 47 | friend bool operator!=(Align Lhs, Align Rhs); | |||
| 48 | friend bool operator<=(Align Lhs, Align Rhs); | |||
| 49 | friend bool operator>=(Align Lhs, Align Rhs); | |||
| 50 | friend bool operator<(Align Lhs, Align Rhs); | |||
| 51 | friend bool operator>(Align Lhs, Align Rhs); | |||
| 52 | friend unsigned encode(struct MaybeAlign A); | |||
| 53 | friend struct MaybeAlign decodeMaybeAlign(unsigned Value); | |||
| 54 | ||||
| 55 | /// A trivial type to allow construction of constexpr Align. | |||
| 56 | /// This is currently needed to workaround a bug in GCC 5.3 which prevents | |||
| 57 | /// definition of constexpr assign operators. | |||
| 58 | /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic | |||
| 59 | /// FIXME: Remove this, make all assign operators constexpr and introduce user | |||
| 60 | /// defined literals when we don't have to support GCC 5.3 anymore. | |||
| 61 | /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain | |||
| 62 | struct LogValue { | |||
| 63 | uint8_t Log; | |||
| 64 | }; | |||
| 65 | ||||
| 66 | public: | |||
| 67 | /// Default is byte-aligned. | |||
| 68 | constexpr Align() = default; | |||
| 69 | /// Do not perform checks in case of copy/move construct/assign, because the | |||
| 70 | /// checks have been performed when building `Other`. | |||
| 71 | constexpr Align(const Align &Other) = default; | |||
| 72 | constexpr Align(Align &&Other) = default; | |||
| 73 | Align &operator=(const Align &Other) = default; | |||
| 74 | Align &operator=(Align &&Other) = default; | |||
| 75 | ||||
| 76 | explicit Align(uint64_t Value) { | |||
| 77 | assert(Value > 0 && "Value must not be 0")((void)0); | |||
| 78 | assert(llvm::isPowerOf2_64(Value) && "Alignment is not a power of 2")((void)0); | |||
| 79 | ShiftValue = Log2_64(Value); | |||
| 80 | assert(ShiftValue < 64 && "Broken invariant")((void)0); | |||
| 81 | } | |||
| 82 | ||||
| 83 | /// This is a hole in the type system and should not be abused. | |||
| 84 | /// Needed to interact with C for instance. | |||
| 85 | uint64_t value() const { return uint64_t(1) << ShiftValue; } | |||
| ||||
| 86 | ||||
| 87 | /// Allow constructions of constexpr Align. | |||
| 88 | template <size_t kValue> constexpr static LogValue Constant() { | |||
| 89 | return LogValue{static_cast<uint8_t>(CTLog2<kValue>())}; | |||
| 90 | } | |||
| 91 | ||||
| 92 | /// Allow constructions of constexpr Align from types. | |||
| 93 | /// Compile time equivalent to Align(alignof(T)). | |||
| 94 | template <typename T> constexpr static LogValue Of() { | |||
| 95 | return Constant<std::alignment_of<T>::value>(); | |||
| 96 | } | |||
| 97 | ||||
| 98 | /// Constexpr constructor from LogValue type. | |||
| 99 | constexpr Align(LogValue CA) : ShiftValue(CA.Log) {} | |||
| 100 | }; | |||
| 101 | ||||
| 102 | /// Treats the value 0 as a 1, so Align is always at least 1. | |||
| 103 | inline Align assumeAligned(uint64_t Value) { | |||
| 104 | return Value ? Align(Value) : Align(); | |||
| 105 | } | |||
| 106 | ||||
| 107 | /// This struct is a compact representation of a valid (power of two) or | |||
| 108 | /// undefined (0) alignment. | |||
| 109 | struct MaybeAlign : public llvm::Optional<Align> { | |||
| 110 | private: | |||
| 111 | using UP = llvm::Optional<Align>; | |||
| 112 | ||||
| 113 | public: | |||
| 114 | /// Default is undefined. | |||
| 115 | MaybeAlign() = default; | |||
| 116 | /// Do not perform checks in case of copy/move construct/assign, because the | |||
| 117 | /// checks have been performed when building `Other`. | |||
| 118 | MaybeAlign(const MaybeAlign &Other) = default; | |||
| 119 | MaybeAlign &operator=(const MaybeAlign &Other) = default; | |||
| 120 | MaybeAlign(MaybeAlign &&Other) = default; | |||
| 121 | MaybeAlign &operator=(MaybeAlign &&Other) = default; | |||
| 122 | ||||
| 123 | /// Use llvm::Optional<Align> constructor. | |||
| 124 | using UP::UP; | |||
| 125 | ||||
| 126 | explicit MaybeAlign(uint64_t Value) { | |||
| 127 | assert((Value == 0 || llvm::isPowerOf2_64(Value)) &&((void)0) | |||
| 128 | "Alignment is neither 0 nor a power of 2")((void)0); | |||
| 129 | if (Value) | |||
| 130 | emplace(Value); | |||
| 131 | } | |||
| 132 | ||||
| 133 | /// For convenience, returns a valid alignment or 1 if undefined. | |||
| 134 | Align valueOrOne() const { return hasValue() ? getValue() : Align(); } | |||
| 135 | }; | |||
| 136 | ||||
| 137 | /// Checks that SizeInBytes is a multiple of the alignment. | |||
| 138 | inline bool isAligned(Align Lhs, uint64_t SizeInBytes) { | |||
| 139 | return SizeInBytes % Lhs.value() == 0; | |||
| 140 | } | |||
| 141 | ||||
| 142 | /// Checks that Addr is a multiple of the alignment. | |||
| 143 | inline bool isAddrAligned(Align Lhs, const void *Addr) { | |||
| 144 | return isAligned(Lhs, reinterpret_cast<uintptr_t>(Addr)); | |||
| 145 | } | |||
| 146 | ||||
| 147 | /// Returns a multiple of A needed to store `Size` bytes. | |||
| 148 | inline uint64_t alignTo(uint64_t Size, Align A) { | |||
| 149 | const uint64_t Value = A.value(); | |||
| 150 | // The following line is equivalent to `(Size + Value - 1) / Value * Value`. | |||
| 151 | ||||
| 152 | // The division followed by a multiplication can be thought of as a right | |||
| 153 | // shift followed by a left shift which zeros out the extra bits produced in | |||
| 154 | // the bump; `~(Value - 1)` is a mask where all those bits being zeroed out | |||
| 155 | // are just zero. | |||
| 156 | ||||
| 157 | // Most compilers can generate this code but the pattern may be missed when | |||
| 158 | // multiple functions gets inlined. | |||
| 159 | return (Size + Value - 1) & ~(Value - 1U); | |||
| 160 | } | |||
| 161 | ||||
| 162 | /// If non-zero \p Skew is specified, the return value will be a minimal integer | |||
| 163 | /// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for | |||
| 164 | /// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p | |||
| 165 | /// Skew mod \p A'. | |||
| 166 | /// | |||
| 167 | /// Examples: | |||
| 168 | /// \code | |||
| 169 | /// alignTo(5, Align(8), 7) = 7 | |||
| 170 | /// alignTo(17, Align(8), 1) = 17 | |||
| 171 | /// alignTo(~0LL, Align(8), 3) = 3 | |||
| 172 | /// \endcode | |||
| 173 | inline uint64_t alignTo(uint64_t Size, Align A, uint64_t Skew) { | |||
| 174 | const uint64_t Value = A.value(); | |||
| 175 | Skew %= Value; | |||
| 176 | return ((Size + Value - 1 - Skew) & ~(Value - 1U)) + Skew; | |||
| 177 | } | |||
| 178 | ||||
| 179 | /// Returns a multiple of A needed to store `Size` bytes. | |||
| 180 | /// Returns `Size` if current alignment is undefined. | |||
| 181 | inline uint64_t alignTo(uint64_t Size, MaybeAlign A) { | |||
| 182 | return A ? alignTo(Size, A.getValue()) : Size; | |||
| 183 | } | |||
| 184 | ||||
| 185 | /// Aligns `Addr` to `Alignment` bytes, rounding up. | |||
| 186 | inline uintptr_t alignAddr(const void *Addr, Align Alignment) { | |||
| 187 | uintptr_t ArithAddr = reinterpret_cast<uintptr_t>(Addr); | |||
| 188 | assert(static_cast<uintptr_t>(ArithAddr + Alignment.value() - 1) >=((void)0) | |||
| 189 | ArithAddr &&((void)0) | |||
| 190 | "Overflow")((void)0); | |||
| 191 | return alignTo(ArithAddr, Alignment); | |||
| 192 | } | |||
| 193 | ||||
| 194 | /// Returns the offset to the next integer (mod 2**64) that is greater than | |||
| 195 | /// or equal to \p Value and is a multiple of \p Align. | |||
| 196 | inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) { | |||
| 197 | return alignTo(Value, Alignment) - Value; | |||
| 198 | } | |||
| 199 | ||||
| 200 | /// Returns the necessary adjustment for aligning `Addr` to `Alignment` | |||
| 201 | /// bytes, rounding up. | |||
| 202 | inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) { | |||
| 203 | return offsetToAlignment(reinterpret_cast<uintptr_t>(Addr), Alignment); | |||
| 204 | } | |||
| 205 | ||||
| 206 | /// Returns the log2 of the alignment. | |||
| 207 | inline unsigned Log2(Align A) { return A.ShiftValue; } | |||
| 208 | ||||
| 209 | /// Returns the alignment that satisfies both alignments. | |||
| 210 | /// Same semantic as MinAlign. | |||
| 211 | inline Align commonAlignment(Align A, Align B) { return std::min(A, B); } | |||
| 212 | ||||
| 213 | /// Returns the alignment that satisfies both alignments. | |||
| 214 | /// Same semantic as MinAlign. | |||
| 215 | inline Align commonAlignment(Align A, uint64_t Offset) { | |||
| 216 | return Align(MinAlign(A.value(), Offset)); | |||
| 217 | } | |||
| 218 | ||||
| 219 | /// Returns the alignment that satisfies both alignments. | |||
| 220 | /// Same semantic as MinAlign. | |||
| 221 | inline MaybeAlign commonAlignment(MaybeAlign A, MaybeAlign B) { | |||
| 222 | return A && B ? commonAlignment(*A, *B) : A ? A : B; | |||
| 223 | } | |||
| 224 | ||||
| 225 | /// Returns the alignment that satisfies both alignments. | |||
| 226 | /// Same semantic as MinAlign. | |||
| 227 | inline MaybeAlign commonAlignment(MaybeAlign A, uint64_t Offset) { | |||
| 228 | return MaybeAlign(MinAlign((*A).value(), Offset)); | |||
| 229 | } | |||
| 230 | ||||
| 231 | /// Returns a representation of the alignment that encodes undefined as 0. | |||
| 232 | inline unsigned encode(MaybeAlign A) { return A ? A->ShiftValue + 1 : 0; } | |||
| 233 | ||||
| 234 | /// Dual operation of the encode function above. | |||
| 235 | inline MaybeAlign decodeMaybeAlign(unsigned Value) { | |||
| 236 | if (Value == 0) | |||
| 237 | return MaybeAlign(); | |||
| 238 | Align Out; | |||
| 239 | Out.ShiftValue = Value - 1; | |||
| 240 | return Out; | |||
| 241 | } | |||
| 242 | ||||
| 243 | /// Returns a representation of the alignment, the encoded value is positive by | |||
| 244 | /// definition. | |||
| 245 | inline unsigned encode(Align A) { return encode(MaybeAlign(A)); } | |||
| 246 | ||||
| 247 | /// Comparisons between Align and scalars. Rhs must be positive. | |||
| 248 | inline bool operator==(Align Lhs, uint64_t Rhs) { | |||
| 249 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
| 250 | return Lhs.value() == Rhs; | |||
| 251 | } | |||
| 252 | inline bool operator!=(Align Lhs, uint64_t Rhs) { | |||
| 253 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
| 254 | return Lhs.value() != Rhs; | |||
| 255 | } | |||
| 256 | inline bool operator<=(Align Lhs, uint64_t Rhs) { | |||
| 257 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
| 258 | return Lhs.value() <= Rhs; | |||
| 259 | } | |||
| 260 | inline bool operator>=(Align Lhs, uint64_t Rhs) { | |||
| 261 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
| 262 | return Lhs.value() >= Rhs; | |||
| 263 | } | |||
| 264 | inline bool operator<(Align Lhs, uint64_t Rhs) { | |||
| 265 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
| 266 | return Lhs.value() < Rhs; | |||
| 267 | } | |||
| 268 | inline bool operator>(Align Lhs, uint64_t Rhs) { | |||
| 269 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
| 270 | return Lhs.value() > Rhs; | |||
| 271 | } | |||
| 272 | ||||
| 273 | /// Comparisons between MaybeAlign and scalars. | |||
| 274 | inline bool operator==(MaybeAlign Lhs, uint64_t Rhs) { | |||
| 275 | return Lhs ? (*Lhs).value() == Rhs : Rhs == 0; | |||
| 276 | } | |||
| 277 | inline bool operator!=(MaybeAlign Lhs, uint64_t Rhs) { | |||
| 278 | return Lhs ? (*Lhs).value() != Rhs : Rhs != 0; | |||
| 279 | } | |||
| 280 | ||||
| 281 | /// Comparisons operators between Align. | |||
| 282 | inline bool operator==(Align Lhs, Align Rhs) { | |||
| 283 | return Lhs.ShiftValue == Rhs.ShiftValue; | |||
| 284 | } | |||
| 285 | inline bool operator!=(Align Lhs, Align Rhs) { | |||
| 286 | return Lhs.ShiftValue != Rhs.ShiftValue; | |||
| 287 | } | |||
| 288 | inline bool operator<=(Align Lhs, Align Rhs) { | |||
| 289 | return Lhs.ShiftValue <= Rhs.ShiftValue; | |||
| 290 | } | |||
| 291 | inline bool operator>=(Align Lhs, Align Rhs) { | |||
| 292 | return Lhs.ShiftValue >= Rhs.ShiftValue; | |||
| 293 | } | |||
| 294 | inline bool operator<(Align Lhs, Align Rhs) { | |||
| 295 | return Lhs.ShiftValue < Rhs.ShiftValue; | |||
| 296 | } | |||
| 297 | inline bool operator>(Align Lhs, Align Rhs) { | |||
| 298 | return Lhs.ShiftValue > Rhs.ShiftValue; | |||
| 299 | } | |||
| 300 | ||||
| 301 | // Don't allow relational comparisons with MaybeAlign. | |||
| 302 | bool operator<=(Align Lhs, MaybeAlign Rhs) = delete; | |||
| 303 | bool operator>=(Align Lhs, MaybeAlign Rhs) = delete; | |||
| 304 | bool operator<(Align Lhs, MaybeAlign Rhs) = delete; | |||
| 305 | bool operator>(Align Lhs, MaybeAlign Rhs) = delete; | |||
| 306 | ||||
| 307 | bool operator<=(MaybeAlign Lhs, Align Rhs) = delete; | |||
| 308 | bool operator>=(MaybeAlign Lhs, Align Rhs) = delete; | |||
| 309 | bool operator<(MaybeAlign Lhs, Align Rhs) = delete; | |||
| 310 | bool operator>(MaybeAlign Lhs, Align Rhs) = delete; | |||
| 311 | ||||
| 312 | bool operator<=(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
| 313 | bool operator>=(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
| 314 | bool operator<(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
| 315 | bool operator>(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
| 316 | ||||
| 317 | inline Align operator*(Align Lhs, uint64_t Rhs) { | |||
| 318 | assert(Rhs > 0 && "Rhs must be positive")((void)0); | |||
| 319 | return Align(Lhs.value() * Rhs); | |||
| 320 | } | |||
| 321 | ||||
| 322 | inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) { | |||
| 323 | assert(Rhs > 0 && "Rhs must be positive")((void)0); | |||
| 324 | return Lhs ? Lhs.getValue() * Rhs : MaybeAlign(); | |||
| 325 | } | |||
| 326 | ||||
| 327 | inline Align operator/(Align Lhs, uint64_t Divisor) { | |||
| 328 | assert(llvm::isPowerOf2_64(Divisor) &&((void)0) | |||
| 329 | "Divisor must be positive and a power of 2")((void)0); | |||
| 330 | assert(Lhs != 1 && "Can't halve byte alignment")((void)0); | |||
| 331 | return Align(Lhs.value() / Divisor); | |||
| 332 | } | |||
| 333 | ||||
| 334 | inline MaybeAlign operator/(MaybeAlign Lhs, uint64_t Divisor) { | |||
| 335 | assert(llvm::isPowerOf2_64(Divisor) &&((void)0) | |||
| 336 | "Divisor must be positive and a power of 2")((void)0); | |||
| 337 | return Lhs ? Lhs.getValue() / Divisor : MaybeAlign(); | |||
| 338 | } | |||
| 339 | ||||
| 340 | inline Align max(MaybeAlign Lhs, Align Rhs) { | |||
| 341 | return Lhs && *Lhs > Rhs ? *Lhs : Rhs; | |||
| 342 | } | |||
| 343 | ||||
| 344 | inline Align max(Align Lhs, MaybeAlign Rhs) { | |||
| 345 | return Rhs && *Rhs > Lhs ? *Rhs : Lhs; | |||
| 346 | } | |||
| 347 | ||||
| 348 | #ifndef NDEBUG1 | |||
| 349 | // For usage in LLVM_DEBUG macros. | |||
| 350 | inline std::string DebugStr(const Align &A) { | |||
| 351 | return std::to_string(A.value()); | |||
| 352 | } | |||
| 353 | // For usage in LLVM_DEBUG macros. | |||
| 354 | inline std::string DebugStr(const MaybeAlign &MA) { | |||
| 355 | if (MA) | |||
| 356 | return std::to_string(MA->value()); | |||
| 357 | return "None"; | |||
| 358 | } | |||
| 359 | #endif // NDEBUG | |||
| 360 | ||||
| 361 | #undef ALIGN_CHECK_ISPOSITIVE | |||
| 362 | ||||
| 363 | } // namespace llvm | |||
| 364 | ||||
| 365 | #endif // LLVM_SUPPORT_ALIGNMENT_H_ |