SHARE
TWEET

Untitled

a guest Aug 8th, 2019 61 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. Index: lib/Transforms/Utils/SimplifyLibCalls.cpp
  2. ===================================================================
  3. --- lib/Transforms/Utils/SimplifyLibCalls.cpp   (revision 367718)
  4. +++ lib/Transforms/Utils/SimplifyLibCalls.cpp   (working copy)
  5. @@ -1,3159 +1,3178 @@
  6.  //===------ SimplifyLibCalls.cpp - Library calls simplifier ---------------===//
  7.  //
  8.  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9.  // See https://llvm.org/LICENSE.txt for license information.
  10.  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11.  //
  12.  //===----------------------------------------------------------------------===//
  13.  //
  14.  // This file implements the library calls simplifier. It does not implement
  15.  // any pass, but can't be used by other passes to do simplifications.
  16.  //
  17.  //===----------------------------------------------------------------------===//
  18.  
  19.  #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
  20.  #include "llvm/ADT/APSInt.h"
  21.  #include "llvm/ADT/SmallString.h"
  22.  #include "llvm/ADT/StringMap.h"
  23.  #include "llvm/ADT/Triple.h"
  24.  #include "llvm/Analysis/BlockFrequencyInfo.h"
  25.  #include "llvm/Analysis/ConstantFolding.h"
  26.  #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  27.  #include "llvm/Analysis/ProfileSummaryInfo.h"
  28.  #include "llvm/Analysis/TargetLibraryInfo.h"
  29.  #include "llvm/Transforms/Utils/Local.h"
  30.  #include "llvm/Analysis/ValueTracking.h"
  31.  #include "llvm/Analysis/CaptureTracking.h"
  32.  #include "llvm/Analysis/Loads.h"
  33.  #include "llvm/IR/DataLayout.h"
  34.  #include "llvm/IR/Function.h"
  35.  #include "llvm/IR/IRBuilder.h"
  36.  #include "llvm/IR/IntrinsicInst.h"
  37.  #include "llvm/IR/Intrinsics.h"
  38.  #include "llvm/IR/LLVMContext.h"
  39.  #include "llvm/IR/Module.h"
  40.  #include "llvm/IR/PatternMatch.h"
  41.  #include "llvm/Support/CommandLine.h"
  42.  #include "llvm/Support/KnownBits.h"
  43.  #include "llvm/Transforms/Utils/BuildLibCalls.h"
  44.  #include "llvm/Transforms/Utils/SizeOpts.h"
  45.  
  46.  using namespace llvm;
  47.  using namespace PatternMatch;
  48.  
  49.  static cl::opt<bool>
  50.      EnableUnsafeFPShrink("enable-double-float-shrink", cl::Hidden,
  51.                           cl::init(false),
  52.                           cl::desc("Enable unsafe double to float "
  53.                                    "shrinking for math lib calls"));
  54.  
  55.  
  56.  //===----------------------------------------------------------------------===//
  57.  // Helper Functions
  58.  //===----------------------------------------------------------------------===//
  59.  
  60.  static bool ignoreCallingConv(LibFunc Func) {
  61.    return Func == LibFunc_abs || Func == LibFunc_labs ||
  62.           Func == LibFunc_llabs || Func == LibFunc_strlen;
  63.  }
  64.  
  65.  static bool isCallingConvCCompatible(CallInst *CI) {
  66.    switch(CI->getCallingConv()) {
  67.    default:
  68.      return false;
  69.    case llvm::CallingConv::C:
  70.      return true;
  71.    case llvm::CallingConv::ARM_APCS:
  72.    case llvm::CallingConv::ARM_AAPCS:
  73.    case llvm::CallingConv::ARM_AAPCS_VFP: {
  74.  
  75.      // The iOS ABI diverges from the standard in some cases, so for now don't
  76.      // try to simplify those calls.
  77.      if (Triple(CI->getModule()->getTargetTriple()).isiOS())
  78.        return false;
  79.  
  80.      auto *FuncTy = CI->getFunctionType();
  81.  
  82.      if (!FuncTy->getReturnType()->isPointerTy() &&
  83.          !FuncTy->getReturnType()->isIntegerTy() &&
  84.          !FuncTy->getReturnType()->isVoidTy())
  85.        return false;
  86.  
  87.      for (auto Param : FuncTy->params()) {
  88.        if (!Param->isPointerTy() && !Param->isIntegerTy())
  89.          return false;
  90.      }
  91.      return true;
  92.    }
  93.    }
  94.    return false;
  95.  }
  96.  
  97.  /// Return true if it is only used in equality comparisons with With.
  98.  static bool isOnlyUsedInEqualityComparison(Value *V, Value *With) {
  99.    for (User *U : V->users()) {
  100.      if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
  101.        if (IC->isEquality() && IC->getOperand(1) == With)
  102.          continue;
  103.      // Unknown instruction.
  104.      return false;
  105.    }
  106.    return true;
  107.  }
  108.  
  109.  static bool callHasFloatingPointArgument(const CallInst *CI) {
  110.    return any_of(CI->operands(), [](const Use &OI) {
  111.      return OI->getType()->isFloatingPointTy();
  112.    });
  113.  }
  114.  
  115.  static bool callHasFP128Argument(const CallInst *CI) {
  116.    return any_of(CI->operands(), [](const Use &OI) {
  117.      return OI->getType()->isFP128Ty();
  118.    });
  119.  }
  120.  
  121.  static Value *convertStrToNumber(CallInst *CI, StringRef &Str, int64_t Base) {
  122.    if (Base < 2 || Base > 36)
  123.      // handle special zero base
  124.      if (Base != 0)
  125.        return nullptr;
  126.  
  127.    char *End;
  128.    std::string nptr = Str.str();
  129.    errno = 0;
  130.    long long int Result = strtoll(nptr.c_str(), &End, Base);
  131.    if (errno)
  132.      return nullptr;
  133.  
  134.    // if we assume all possible target locales are ASCII supersets,
  135.    // then if strtoll successfully parses a number on the host,
  136.    // it will also successfully parse the same way on the target
  137.    if (*End != '\0')
  138.      return nullptr;
  139.  
  140.    if (!isIntN(CI->getType()->getPrimitiveSizeInBits(), Result))
  141.      return nullptr;
  142.  
  143.    return ConstantInt::get(CI->getType(), Result);
  144.  }
  145.  
  146.  static bool isLocallyOpenedFile(Value *File, CallInst *CI, IRBuilder<> &B,
  147.                                  const TargetLibraryInfo *TLI) {
  148.    CallInst *FOpen = dyn_cast<CallInst>(File);
  149.    if (!FOpen)
  150.      return false;
  151.  
  152.    Function *InnerCallee = FOpen->getCalledFunction();
  153.    if (!InnerCallee)
  154.      return false;
  155.  
  156.    LibFunc Func;
  157.    if (!TLI->getLibFunc(*InnerCallee, Func) || !TLI->has(Func) ||
  158.        Func != LibFunc_fopen)
  159.      return false;
  160.  
  161.    inferLibFuncAttributes(*CI->getCalledFunction(), *TLI);
  162.    if (PointerMayBeCaptured(File, true, true))
  163.      return false;
  164.  
  165.    return true;
  166.  }
  167.  
  168.  static bool isOnlyUsedInComparisonWithZero(Value *V) {
  169.    for (User *U : V->users()) {
  170.      if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
  171.        if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
  172.          if (C->isNullValue())
  173.            continue;
  174.      // Unknown instruction.
  175.      return false;
  176.    }
  177.    return true;
  178.  }
  179.  
  180.  static bool canTransformToMemCmp(CallInst *CI, Value *Str, uint64_t Len,
  181.                                   const DataLayout &DL) {
  182.    if (!isOnlyUsedInComparisonWithZero(CI))
  183.      return false;
  184.  
  185.    if (!isDereferenceableAndAlignedPointer(Str, 1, APInt(64, Len), DL))
  186.      return false;
  187.  
  188.    if (CI->getFunction()->hasFnAttribute(Attribute::SanitizeMemory))
  189.      return false;
  190.  
  191.    return true;
  192.  }
  193.  
  194.  //===----------------------------------------------------------------------===//
  195.  // String and Memory Library Call Optimizations
  196.  //===----------------------------------------------------------------------===//
  197.  
  198.  Value *LibCallSimplifier::optimizeStrCat(CallInst *CI, IRBuilder<> &B) {
  199.    // Extract some information from the instruction
  200.    Value *Dst = CI->getArgOperand(0);
  201.    Value *Src = CI->getArgOperand(1);
  202.  
  203.    // See if we can get the length of the input string.
  204.    uint64_t Len = GetStringLength(Src);
  205.    if (Len == 0)
  206.      return nullptr;
  207.    --Len; // Unbias length.
  208.  
  209.    // Handle the simple, do-nothing case: strcat(x, "") -> x
  210.    if (Len == 0)
  211.      return Dst;
  212.  
  213.    return emitStrLenMemCpy(Src, Dst, Len, B);
  214.  }
  215.  
  216.  Value *LibCallSimplifier::emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len,
  217.                                             IRBuilder<> &B) {
  218.    // We need to find the end of the destination string.  That's where the
  219.    // memory is to be moved to. We just generate a call to strlen.
  220.    Value *DstLen = emitStrLen(Dst, B, DL, TLI);
  221.    if (!DstLen)
  222.      return nullptr;
  223.  
  224.    // Now that we have the destination's length, we must index into the
  225.    // destination's pointer to get the actual memcpy destination (end of
  226.    // the string .. we're concatenating).
  227.    Value *CpyDst = B.CreateGEP(B.getInt8Ty(), Dst, DstLen, "endptr");
  228.  
  229.    // We have enough information to now generate the memcpy call to do the
  230.    // concatenation for us.  Make a memcpy to copy the nul byte with align = 1.
  231.    B.CreateMemCpy(CpyDst, 1, Src, 1,
  232.                   ConstantInt::get(DL.getIntPtrType(Src->getContext()), Len + 1));
  233.    return Dst;
  234.  }
  235.  
  236.  Value *LibCallSimplifier::optimizeStrNCat(CallInst *CI, IRBuilder<> &B) {
  237.    // Extract some information from the instruction.
  238.    Value *Dst = CI->getArgOperand(0);
  239.    Value *Src = CI->getArgOperand(1);
  240.    uint64_t Len;
  241.  
  242.    // We don't do anything if length is not constant.
  243.    if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
  244.      Len = LengthArg->getZExtValue();
  245.    else
  246.      return nullptr;
  247.  
  248.    // See if we can get the length of the input string.
  249.    uint64_t SrcLen = GetStringLength(Src);
  250.    if (SrcLen == 0)
  251.      return nullptr;
  252.    --SrcLen; // Unbias length.
  253.  
  254.    // Handle the simple, do-nothing cases:
  255.    // strncat(x, "", c) -> x
  256.    // strncat(x,  c, 0) -> x
  257.    if (SrcLen == 0 || Len == 0)
  258.      return Dst;
  259.  
  260.    // We don't optimize this case.
  261.    if (Len < SrcLen)
  262.      return nullptr;
  263.  
  264.    // strncat(x, s, c) -> strcat(x, s)
  265.    // s is constant so the strcat can be optimized further.
  266.    return emitStrLenMemCpy(Src, Dst, SrcLen, B);
  267.  }
  268.  
  269.  Value *LibCallSimplifier::optimizeStrChr(CallInst *CI, IRBuilder<> &B) {
  270.    Function *Callee = CI->getCalledFunction();
  271.    FunctionType *FT = Callee->getFunctionType();
  272.    Value *SrcStr = CI->getArgOperand(0);
  273.  
  274.    // If the second operand is non-constant, see if we can compute the length
  275.    // of the input string and turn this into memchr.
  276.    ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
  277.    if (!CharC) {
  278.      uint64_t Len = GetStringLength(SrcStr);
  279.      if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32)) // memchr needs i32.
  280.        return nullptr;
  281.  
  282.      return emitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
  283.                        ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len),
  284.                        B, DL, TLI);
  285.    }
  286.  
  287.    // Otherwise, the character is a constant, see if the first argument is
  288.    // a string literal.  If so, we can constant fold.
  289.    StringRef Str;
  290.    if (!getConstantStringInfo(SrcStr, Str)) {
  291.      if (CharC->isZero()) // strchr(p, 0) -> p + strlen(p)
  292.        return B.CreateGEP(B.getInt8Ty(), SrcStr, emitStrLen(SrcStr, B, DL, TLI),
  293.                           "strchr");
  294.      return nullptr;
  295.    }
  296.  
  297.    // Compute the offset, make sure to handle the case when we're searching for
  298.    // zero (a weird way to spell strlen).
  299.    size_t I = (0xFF & CharC->getSExtValue()) == 0
  300.                   ? Str.size()
  301.                   : Str.find(CharC->getSExtValue());
  302.    if (I == StringRef::npos) // Didn't find the char.  strchr returns null.
  303.      return Constant::getNullValue(CI->getType());
  304.  
  305.    // strchr(s+n,c)  -> gep(s+n+i,c)
  306.    return B.CreateGEP(B.getInt8Ty(), SrcStr, B.getInt64(I), "strchr");
  307.  }
  308.  
  309.  Value *LibCallSimplifier::optimizeStrRChr(CallInst *CI, IRBuilder<> &B) {
  310.    Value *SrcStr = CI->getArgOperand(0);
  311.    ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
  312.  
  313.    // Cannot fold anything if we're not looking for a constant.
  314.    if (!CharC)
  315.      return nullptr;
  316.  
  317.    StringRef Str;
  318.    if (!getConstantStringInfo(SrcStr, Str)) {
  319.      // strrchr(s, 0) -> strchr(s, 0)
  320.      if (CharC->isZero())
  321.        return emitStrChr(SrcStr, '\0', B, TLI);
  322.      return nullptr;
  323.    }
  324.  
  325.    // Compute the offset.
  326.    size_t I = (0xFF & CharC->getSExtValue()) == 0
  327.                   ? Str.size()
  328.                   : Str.rfind(CharC->getSExtValue());
  329.    if (I == StringRef::npos) // Didn't find the char. Return null.
  330.      return Constant::getNullValue(CI->getType());
  331.  
  332.    // strrchr(s+n,c) -> gep(s+n+i,c)
  333.    return B.CreateGEP(B.getInt8Ty(), SrcStr, B.getInt64(I), "strrchr");
  334.  }
  335.  
  336.  Value *LibCallSimplifier::optimizeStrCmp(CallInst *CI, IRBuilder<> &B) {
  337.    Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
  338.    if (Str1P == Str2P) // strcmp(x,x)  -> 0
  339.      return ConstantInt::get(CI->getType(), 0);
  340.  
  341.    StringRef Str1, Str2;
  342.    bool HasStr1 = getConstantStringInfo(Str1P, Str1);
  343.    bool HasStr2 = getConstantStringInfo(Str2P, Str2);
  344.  
  345.    // strcmp(x, y)  -> cnst  (if both x and y are constant strings)
  346.    if (HasStr1 && HasStr2)
  347.      return ConstantInt::get(CI->getType(), Str1.compare(Str2));
  348.  
  349.    if (HasStr1 && Str1.empty()) // strcmp("", x) -> -*x
  350.      return B.CreateNeg(B.CreateZExt(
  351.          B.CreateLoad(B.getInt8Ty(), Str2P, "strcmpload"), CI->getType()));
  352.  
  353.    if (HasStr2 && Str2.empty()) // strcmp(x,"") -> *x
  354.      return B.CreateZExt(B.CreateLoad(B.getInt8Ty(), Str1P, "strcmpload"),
  355.                          CI->getType());
  356.  
  357.    // strcmp(P, "x") -> memcmp(P, "x", 2)
  358.    uint64_t Len1 = GetStringLength(Str1P);
  359.    uint64_t Len2 = GetStringLength(Str2P);
  360.    if (Len1 && Len2) {
  361.      return emitMemCmp(Str1P, Str2P,
  362.                        ConstantInt::get(DL.getIntPtrType(CI->getContext()),
  363.                                         std::min(Len1, Len2)),
  364.                        B, DL, TLI);
  365.    }
  366.  
  367.    // strcmp to memcmp
  368.    if (!HasStr1 && HasStr2) {
  369.      if (canTransformToMemCmp(CI, Str1P, Len2, DL))
  370.        return emitMemCmp(
  371.            Str1P, Str2P,
  372.            ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len2), B, DL,
  373.            TLI);
  374.    } else if (HasStr1 && !HasStr2) {
  375.      if (canTransformToMemCmp(CI, Str2P, Len1, DL))
  376.        return emitMemCmp(
  377.            Str1P, Str2P,
  378.            ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len1), B, DL,
  379.            TLI);
  380.    }
  381.  
  382.    return nullptr;
  383.  }
  384.  
  385.  Value *LibCallSimplifier::optimizeStrNCmp(CallInst *CI, IRBuilder<> &B) {
  386.    Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
  387.    if (Str1P == Str2P) // strncmp(x,x,n)  -> 0
  388.      return ConstantInt::get(CI->getType(), 0);
  389.  
  390.    // Get the length argument if it is constant.
  391.    uint64_t Length;
  392.    if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
  393.      Length = LengthArg->getZExtValue();
  394.    else
  395.      return nullptr;
  396.  
  397.    if (Length == 0) // strncmp(x,y,0)   -> 0
  398.      return ConstantInt::get(CI->getType(), 0);
  399.  
  400.    if (Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
  401.      return emitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, DL, TLI);
  402.  
  403.    StringRef Str1, Str2;
  404.    bool HasStr1 = getConstantStringInfo(Str1P, Str1);
  405.    bool HasStr2 = getConstantStringInfo(Str2P, Str2);
  406.  
  407.    // strncmp(x, y)  -> cnst  (if both x and y are constant strings)
  408.    if (HasStr1 && HasStr2) {
  409.      StringRef SubStr1 = Str1.substr(0, Length);
  410.      StringRef SubStr2 = Str2.substr(0, Length);
  411.      return ConstantInt::get(CI->getType(), SubStr1.compare(SubStr2));
  412.    }
  413.  
  414.    if (HasStr1 && Str1.empty()) // strncmp("", x, n) -> -*x
  415.      return B.CreateNeg(B.CreateZExt(
  416.          B.CreateLoad(B.getInt8Ty(), Str2P, "strcmpload"), CI->getType()));
  417.  
  418.    if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x
  419.      return B.CreateZExt(B.CreateLoad(B.getInt8Ty(), Str1P, "strcmpload"),
  420.                          CI->getType());
  421.  
  422.    uint64_t Len1 = GetStringLength(Str1P);
  423.    uint64_t Len2 = GetStringLength(Str2P);
  424.  
  425.    // strncmp to memcmp
  426.    if (!HasStr1 && HasStr2) {
  427.      Len2 = std::min(Len2, Length);
  428.      if (canTransformToMemCmp(CI, Str1P, Len2, DL))
  429.        return emitMemCmp(
  430.            Str1P, Str2P,
  431.            ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len2), B, DL,
  432.            TLI);
  433.    } else if (HasStr1 && !HasStr2) {
  434.      Len1 = std::min(Len1, Length);
  435.      if (canTransformToMemCmp(CI, Str2P, Len1, DL))
  436.        return emitMemCmp(
  437.            Str1P, Str2P,
  438.            ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len1), B, DL,
  439.            TLI);
  440.    }
  441.  
  442.    return nullptr;
  443.  }
  444.  
  445.  Value *LibCallSimplifier::optimizeStrCpy(CallInst *CI, IRBuilder<> &B) {
  446.    Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
  447.    if (Dst == Src) // strcpy(x,x)  -> x
  448.      return Src;
  449.  
  450.    // See if we can get the length of the input string.
  451.    uint64_t Len = GetStringLength(Src);
  452.    if (Len == 0)
  453.      return nullptr;
  454.  
  455.    // We have enough information to now generate the memcpy call to do the
  456.    // copy for us.  Make a memcpy to copy the nul byte with align = 1.
  457.    B.CreateMemCpy(Dst, 1, Src, 1,
  458.                   ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len));
  459.    return Dst;
  460.  }
  461.  
  462.  Value *LibCallSimplifier::optimizeStpCpy(CallInst *CI, IRBuilder<> &B) {
  463.    Function *Callee = CI->getCalledFunction();
  464.    Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
  465.    if (Dst == Src) { // stpcpy(x,x)  -> x+strlen(x)
  466.      Value *StrLen = emitStrLen(Src, B, DL, TLI);
  467.      return StrLen ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, StrLen) : nullptr;
  468.    }
  469.  
  470.    // See if we can get the length of the input string.
  471.    uint64_t Len = GetStringLength(Src);
  472.    if (Len == 0)
  473.      return nullptr;
  474.  
  475.    Type *PT = Callee->getFunctionType()->getParamType(0);
  476.    Value *LenV = ConstantInt::get(DL.getIntPtrType(PT), Len);
  477.    Value *DstEnd = B.CreateGEP(B.getInt8Ty(), Dst,
  478.                                ConstantInt::get(DL.getIntPtrType(PT), Len - 1));
  479.  
  480.    // We have enough information to now generate the memcpy call to do the
  481.    // copy for us.  Make a memcpy to copy the nul byte with align = 1.
  482.    B.CreateMemCpy(Dst, 1, Src, 1, LenV);
  483.    return DstEnd;
  484.  }
  485.  
  486.  Value *LibCallSimplifier::optimizeStrNCpy(CallInst *CI, IRBuilder<> &B) {
  487.    Function *Callee = CI->getCalledFunction();
  488.    Value *Dst = CI->getArgOperand(0);
  489.    Value *Src = CI->getArgOperand(1);
  490.    Value *LenOp = CI->getArgOperand(2);
  491.  
  492.    // See if we can get the length of the input string.
  493.    uint64_t SrcLen = GetStringLength(Src);
  494.    if (SrcLen == 0)
  495.      return nullptr;
  496.    --SrcLen;
  497.  
  498.    if (SrcLen == 0) {
  499.      // strncpy(x, "", y) -> memset(align 1 x, '\0', y)
  500.      B.CreateMemSet(Dst, B.getInt8('\0'), LenOp, 1);
  501.      return Dst;
  502.    }
  503.  
  504.    uint64_t Len;
  505.    if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(LenOp))
  506.      Len = LengthArg->getZExtValue();
  507.    else
  508.      return nullptr;
  509.  
  510.    if (Len == 0)
  511.      return Dst; // strncpy(x, y, 0) -> x
  512.  
  513.    // Let strncpy handle the zero padding
  514.    if (Len > SrcLen + 1)
  515.      return nullptr;
  516.  
  517.    Type *PT = Callee->getFunctionType()->getParamType(0);
  518.    // strncpy(x, s, c) -> memcpy(align 1 x, align 1 s, c) [s and c are constant]
  519.    B.CreateMemCpy(Dst, 1, Src, 1, ConstantInt::get(DL.getIntPtrType(PT), Len));
  520.  
  521.    return Dst;
  522.  }
  523.  
  524.  Value *LibCallSimplifier::optimizeStringLength(CallInst *CI, IRBuilder<> &B,
  525.                                                 unsigned CharSize) {
  526.    Value *Src = CI->getArgOperand(0);
  527.  
  528.    // Constant folding: strlen("xyz") -> 3
  529.    if (uint64_t Len = GetStringLength(Src, CharSize))
  530.      return ConstantInt::get(CI->getType(), Len - 1);
  531.  
  532.    // If s is a constant pointer pointing to a string literal, we can fold
  533.    // strlen(s + x) to strlen(s) - x, when x is known to be in the range
  534.    // [0, strlen(s)] or the string has a single null terminator '\0' at the end.
  535.    // We only try to simplify strlen when the pointer s points to an array
  536.    // of i8. Otherwise, we would need to scale the offset x before doing the
  537.    // subtraction. This will make the optimization more complex, and it's not
  538.    // very useful because calling strlen for a pointer of other types is
  539.    // very uncommon.
  540.    if (GEPOperator *GEP = dyn_cast<GEPOperator>(Src)) {
  541.      if (!isGEPBasedOnPointerToString(GEP, CharSize))
  542.        return nullptr;
  543.  
  544.      ConstantDataArraySlice Slice;
  545.      if (getConstantDataArrayInfo(GEP->getOperand(0), Slice, CharSize)) {
  546.        uint64_t NullTermIdx;
  547.        if (Slice.Array == nullptr) {
  548.          NullTermIdx = 0;
  549.        } else {
  550.          NullTermIdx = ~((uint64_t)0);
  551.          for (uint64_t I = 0, E = Slice.Length; I < E; ++I) {
  552.            if (Slice.Array->getElementAsInteger(I + Slice.Offset) == 0) {
  553.              NullTermIdx = I;
  554.              break;
  555.            }
  556.          }
  557.          // If the string does not have '\0', leave it to strlen to compute
  558.          // its length.
  559.          if (NullTermIdx == ~((uint64_t)0))
  560.            return nullptr;
  561.        }
  562.  
  563.        Value *Offset = GEP->getOperand(2);
  564.        KnownBits Known = computeKnownBits(Offset, DL, 0, nullptr, CI, nullptr);
  565.        Known.Zero.flipAllBits();
  566.        uint64_t ArrSize =
  567.               cast<ArrayType>(GEP->getSourceElementType())->getNumElements();
  568.  
  569.        // KnownZero's bits are flipped, so zeros in KnownZero now represent
  570.        // bits known to be zeros in Offset, and ones in KnowZero represent
  571.        // bits unknown in Offset. Therefore, Offset is known to be in range
  572.        // [0, NullTermIdx] when the flipped KnownZero is non-negative and
  573.        // unsigned-less-than NullTermIdx.
  574.        //
  575.        // If Offset is not provably in the range [0, NullTermIdx], we can still
  576.        // optimize if we can prove that the program has undefined behavior when
  577.        // Offset is outside that range. That is the case when GEP->getOperand(0)
  578.        // is a pointer to an object whose memory extent is NullTermIdx+1.
  579.        if ((Known.Zero.isNonNegative() && Known.Zero.ule(NullTermIdx)) ||
  580.            (GEP->isInBounds() && isa<GlobalVariable>(GEP->getOperand(0)) &&
  581.             NullTermIdx == ArrSize - 1)) {
  582.          Offset = B.CreateSExtOrTrunc(Offset, CI->getType());
  583.          return B.CreateSub(ConstantInt::get(CI->getType(), NullTermIdx),
  584.                             Offset);
  585.        }
  586.      }
  587.  
  588.      return nullptr;
  589.    }
  590.  
  591.    // strlen(x?"foo":"bars") --> x ? 3 : 4
  592.    if (SelectInst *SI = dyn_cast<SelectInst>(Src)) {
  593.      uint64_t LenTrue = GetStringLength(SI->getTrueValue(), CharSize);
  594.      uint64_t LenFalse = GetStringLength(SI->getFalseValue(), CharSize);
  595.      if (LenTrue && LenFalse) {
  596.        ORE.emit([&]() {
  597.          return OptimizationRemark("instcombine", "simplify-libcalls", CI)
  598.                 << "folded strlen(select) to select of constants";
  599.        });
  600.        return B.CreateSelect(SI->getCondition(),
  601.                              ConstantInt::get(CI->getType(), LenTrue - 1),
  602.                              ConstantInt::get(CI->getType(), LenFalse - 1));
  603.      }
  604.    }
  605.  
  606.    // strlen(x) != 0 --> *x != 0
  607.    // strlen(x) == 0 --> *x == 0
  608.    if (isOnlyUsedInZeroEqualityComparison(CI))
  609.      return B.CreateZExt(B.CreateLoad(B.getIntNTy(CharSize), Src, "strlenfirst"),
  610.                          CI->getType());
  611.  
  612.    return nullptr;
  613.  }
  614.  
  615.  Value *LibCallSimplifier::optimizeStrLen(CallInst *CI, IRBuilder<> &B) {
  616.    return optimizeStringLength(CI, B, 8);
  617.  }
  618.  
  619.  Value *LibCallSimplifier::optimizeWcslen(CallInst *CI, IRBuilder<> &B) {
  620.    Module &M = *CI->getModule();
  621.    unsigned WCharSize = TLI->getWCharSize(M) * 8;
  622.    // We cannot perform this optimization without wchar_size metadata.
  623.    if (WCharSize == 0)
  624.      return nullptr;
  625.  
  626.    return optimizeStringLength(CI, B, WCharSize);
  627.  }
  628.  
  629.  Value *LibCallSimplifier::optimizeStrPBrk(CallInst *CI, IRBuilder<> &B) {
  630.    StringRef S1, S2;
  631.    bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
  632.    bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
  633.  
  634.    // strpbrk(s, "") -> nullptr
  635.    // strpbrk("", s) -> nullptr
  636.    if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
  637.      return Constant::getNullValue(CI->getType());
  638.  
  639.    // Constant folding.
  640.    if (HasS1 && HasS2) {
  641.      size_t I = S1.find_first_of(S2);
  642.      if (I == StringRef::npos) // No match.
  643.        return Constant::getNullValue(CI->getType());
  644.  
  645.      return B.CreateGEP(B.getInt8Ty(), CI->getArgOperand(0), B.getInt64(I),
  646.                         "strpbrk");
  647.    }
  648.  
  649.    // strpbrk(s, "a") -> strchr(s, 'a')
  650.    if (HasS2 && S2.size() == 1)
  651.      return emitStrChr(CI->getArgOperand(0), S2[0], B, TLI);
  652.  
  653.    return nullptr;
  654.  }
  655.  
  656.  Value *LibCallSimplifier::optimizeStrTo(CallInst *CI, IRBuilder<> &B) {
  657.    Value *EndPtr = CI->getArgOperand(1);
  658.    if (isa<ConstantPointerNull>(EndPtr)) {
  659.      // With a null EndPtr, this function won't capture the main argument.
  660.      // It would be readonly too, except that it still may write to errno.
  661.      CI->addParamAttr(0, Attribute::NoCapture);
  662.    }
  663.  
  664.    return nullptr;
  665.  }
  666.  
  667.  Value *LibCallSimplifier::optimizeStrSpn(CallInst *CI, IRBuilder<> &B) {
  668.    StringRef S1, S2;
  669.    bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
  670.    bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
  671.  
  672.    // strspn(s, "") -> 0
  673.    // strspn("", s) -> 0
  674.    if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
  675.      return Constant::getNullValue(CI->getType());
  676.  
  677.    // Constant folding.
  678.    if (HasS1 && HasS2) {
  679.      size_t Pos = S1.find_first_not_of(S2);
  680.      if (Pos == StringRef::npos)
  681.        Pos = S1.size();
  682.      return ConstantInt::get(CI->getType(), Pos);
  683.    }
  684.  
  685.    return nullptr;
  686.  }
  687.  
  688.  Value *LibCallSimplifier::optimizeStrCSpn(CallInst *CI, IRBuilder<> &B) {
  689.    StringRef S1, S2;
  690.    bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
  691.    bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
  692.  
  693.    // strcspn("", s) -> 0
  694.    if (HasS1 && S1.empty())
  695.      return Constant::getNullValue(CI->getType());
  696.  
  697.    // Constant folding.
  698.    if (HasS1 && HasS2) {
  699.      size_t Pos = S1.find_first_of(S2);
  700.      if (Pos == StringRef::npos)
  701.        Pos = S1.size();
  702.      return ConstantInt::get(CI->getType(), Pos);
  703.    }
  704.  
  705.    // strcspn(s, "") -> strlen(s)
  706.    if (HasS2 && S2.empty())
  707.      return emitStrLen(CI->getArgOperand(0), B, DL, TLI);
  708.  
  709.    return nullptr;
  710.  }
  711.  
  712.  Value *LibCallSimplifier::optimizeStrStr(CallInst *CI, IRBuilder<> &B) {
  713.    // fold strstr(x, x) -> x.
  714.    if (CI->getArgOperand(0) == CI->getArgOperand(1))
  715.      return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
  716.  
  717.    // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
  718.    if (isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
  719.      Value *StrLen = emitStrLen(CI->getArgOperand(1), B, DL, TLI);
  720.      if (!StrLen)
  721.        return nullptr;
  722.      Value *StrNCmp = emitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
  723.                                   StrLen, B, DL, TLI);
  724.      if (!StrNCmp)
  725.        return nullptr;
  726.      for (auto UI = CI->user_begin(), UE = CI->user_end(); UI != UE;) {
  727.        ICmpInst *Old = cast<ICmpInst>(*UI++);
  728.        Value *Cmp =
  729.            B.CreateICmp(Old->getPredicate(), StrNCmp,
  730.                         ConstantInt::getNullValue(StrNCmp->getType()), "cmp");
  731.        replaceAllUsesWith(Old, Cmp);
  732.      }
  733.      return CI;
  734.    }
  735.  
  736.    // See if either input string is a constant string.
  737.    StringRef SearchStr, ToFindStr;
  738.    bool HasStr1 = getConstantStringInfo(CI->getArgOperand(0), SearchStr);
  739.    bool HasStr2 = getConstantStringInfo(CI->getArgOperand(1), ToFindStr);
  740.  
  741.    // fold strstr(x, "") -> x.
  742.    if (HasStr2 && ToFindStr.empty())
  743.      return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
  744.  
  745.    // If both strings are known, constant fold it.
  746.    if (HasStr1 && HasStr2) {
  747.      size_t Offset = SearchStr.find(ToFindStr);
  748.  
  749.      if (Offset == StringRef::npos) // strstr("foo", "bar") -> null
  750.        return Constant::getNullValue(CI->getType());
  751.  
  752.      // strstr("abcd", "bc") -> gep((char*)"abcd", 1)
  753.      Value *Result = castToCStr(CI->getArgOperand(0), B);
  754.      Result =
  755.          B.CreateConstInBoundsGEP1_64(B.getInt8Ty(), Result, Offset, "strstr");
  756.      return B.CreateBitCast(Result, CI->getType());
  757.    }
  758.  
  759.    // fold strstr(x, "y") -> strchr(x, 'y').
  760.    if (HasStr2 && ToFindStr.size() == 1) {
  761.      Value *StrChr = emitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TLI);
  762.      return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : nullptr;
  763.    }
  764.    return nullptr;
  765.  }
  766.  
  767.  Value *LibCallSimplifier::optimizeMemChr(CallInst *CI, IRBuilder<> &B) {
  768.    Value *SrcStr = CI->getArgOperand(0);
  769.    ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
  770.    ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
  771.  
  772.    // memchr(x, y, 0) -> null
  773.    if (LenC && LenC->isZero())
  774.      return Constant::getNullValue(CI->getType());
  775.  
  776.    // From now on we need at least constant length and string.
  777.    StringRef Str;
  778.    if (!LenC || !getConstantStringInfo(SrcStr, Str, 0, /*TrimAtNul=*/false))
  779.      return nullptr;
  780.  
  781.    // Truncate the string to LenC. If Str is smaller than LenC we will still only
  782.    // scan the string, as reading past the end of it is undefined and we can just
  783.    // return null if we don't find the char.
  784.    Str = Str.substr(0, LenC->getZExtValue());
  785.  
  786.    // If the char is variable but the input str and length are not we can turn
  787.    // this memchr call into a simple bit field test. Of course this only works
  788.    // when the return value is only checked against null.
  789.    //
  790.    // It would be really nice to reuse switch lowering here but we can't change
  791.    // the CFG at this point.
  792.    //
  793.    // memchr("\r\n", C, 2) != nullptr -> (1 << C & ((1 << '\r') | (1 << '\n')))
  794.    // != 0
  795.    //   after bounds check.
  796.    if (!CharC && !Str.empty() && isOnlyUsedInZeroEqualityComparison(CI)) {
  797.      unsigned char Max =
  798.          *std::max_element(reinterpret_cast<const unsigned char *>(Str.begin()),
  799.                            reinterpret_cast<const unsigned char *>(Str.end()));
  800.  
  801.      // Make sure the bit field we're about to create fits in a register on the
  802.      // target.
  803.      // FIXME: On a 64 bit architecture this prevents us from using the
  804.      // interesting range of alpha ascii chars. We could do better by emitting
  805.      // two bitfields or shifting the range by 64 if no lower chars are used.
  806.      if (!DL.fitsInLegalInteger(Max + 1))
  807.        return nullptr;
  808.  
  809.      // For the bit field use a power-of-2 type with at least 8 bits to avoid
  810.      // creating unnecessary illegal types.
  811.      unsigned char Width = NextPowerOf2(std::max((unsigned char)7, Max));
  812.  
  813.      // Now build the bit field.
  814.      APInt Bitfield(Width, 0);
  815.      for (char C : Str)
  816.        Bitfield.setBit((unsigned char)C);
  817.      Value *BitfieldC = B.getInt(Bitfield);
  818.  
  819.      // Adjust width of "C" to the bitfield width, then mask off the high bits.
  820.      Value *C = B.CreateZExtOrTrunc(CI->getArgOperand(1), BitfieldC->getType());
  821.      C = B.CreateAnd(C, B.getIntN(Width, 0xFF));
  822.  
  823.      // First check that the bit field access is within bounds.
  824.      Value *Bounds = B.CreateICmp(ICmpInst::ICMP_ULT, C, B.getIntN(Width, Width),
  825.                                   "memchr.bounds");
  826.  
  827.      // Create code that checks if the given bit is set in the field.
  828.      Value *Shl = B.CreateShl(B.getIntN(Width, 1ULL), C);
  829.      Value *Bits = B.CreateIsNotNull(B.CreateAnd(Shl, BitfieldC), "memchr.bits");
  830.  
  831.      // Finally merge both checks and cast to pointer type. The inttoptr
  832.      // implicitly zexts the i1 to intptr type.
  833.      return B.CreateIntToPtr(B.CreateAnd(Bounds, Bits, "memchr"), CI->getType());
  834.    }
  835.  
  836.    // Check if all arguments are constants.  If so, we can constant fold.
  837.    if (!CharC)
  838.      return nullptr;
  839.  
  840.    // Compute the offset.
  841.    size_t I = Str.find(CharC->getSExtValue() & 0xFF);
  842.    if (I == StringRef::npos) // Didn't find the char.  memchr returns null.
  843.      return Constant::getNullValue(CI->getType());
  844.  
  845.    // memchr(s+n,c,l) -> gep(s+n+i,c)
  846.    return B.CreateGEP(B.getInt8Ty(), SrcStr, B.getInt64(I), "memchr");
  847.  }
  848.  
  849.  static Value *optimizeMemCmpConstantSize(CallInst *CI, Value *LHS, Value *RHS,
  850.                                           uint64_t Len, IRBuilder<> &B,
  851.                                           const DataLayout &DL) {
  852.    if (Len == 0) // memcmp(s1,s2,0) -> 0
  853.      return Constant::getNullValue(CI->getType());
  854.  
  855.    // memcmp(S1,S2,1) -> *(unsigned char*)LHS - *(unsigned char*)RHS
  856.    if (Len == 1) {
  857.      Value *LHSV =
  858.          B.CreateZExt(B.CreateLoad(B.getInt8Ty(), castToCStr(LHS, B), "lhsc"),
  859.                       CI->getType(), "lhsv");
  860.      Value *RHSV =
  861.          B.CreateZExt(B.CreateLoad(B.getInt8Ty(), castToCStr(RHS, B), "rhsc"),
  862.                       CI->getType(), "rhsv");
  863.      return B.CreateSub(LHSV, RHSV, "chardiff");
  864.    }
  865.  
  866.    // memcmp(S1,S2,N/8)==0 -> (*(intN_t*)S1 != *(intN_t*)S2)==0
  867.    // TODO: The case where both inputs are constants does not need to be limited
  868.    // to legal integers or equality comparison. See block below this.
  869.    if (DL.isLegalInteger(Len * 8) && isOnlyUsedInZeroEqualityComparison(CI)) {
  870.      IntegerType *IntType = IntegerType::get(CI->getContext(), Len * 8);
  871.      unsigned PrefAlignment = DL.getPrefTypeAlignment(IntType);
  872.  
  873.      // First, see if we can fold either argument to a constant.
  874.      Value *LHSV = nullptr;
  875.      if (auto *LHSC = dyn_cast<Constant>(LHS)) {
  876.        LHSC = ConstantExpr::getBitCast(LHSC, IntType->getPointerTo());
  877.        LHSV = ConstantFoldLoadFromConstPtr(LHSC, IntType, DL);
  878.      }
  879.      Value *RHSV = nullptr;
  880.      if (auto *RHSC = dyn_cast<Constant>(RHS)) {
  881.        RHSC = ConstantExpr::getBitCast(RHSC, IntType->getPointerTo());
  882.        RHSV = ConstantFoldLoadFromConstPtr(RHSC, IntType, DL);
  883.      }
  884.  
  885.      // Don't generate unaligned loads. If either source is constant data,
  886.      // alignment doesn't matter for that source because there is no load.
  887.      if ((LHSV || getKnownAlignment(LHS, DL, CI) >= PrefAlignment) &&
  888.          (RHSV || getKnownAlignment(RHS, DL, CI) >= PrefAlignment)) {
  889.        if (!LHSV) {
  890.          Type *LHSPtrTy =
  891.              IntType->getPointerTo(LHS->getType()->getPointerAddressSpace());
  892.          LHSV = B.CreateLoad(IntType, B.CreateBitCast(LHS, LHSPtrTy), "lhsv");
  893.        }
  894.        if (!RHSV) {
  895.          Type *RHSPtrTy =
  896.              IntType->getPointerTo(RHS->getType()->getPointerAddressSpace());
  897.          RHSV = B.CreateLoad(IntType, B.CreateBitCast(RHS, RHSPtrTy), "rhsv");
  898.        }
  899.        return B.CreateZExt(B.CreateICmpNE(LHSV, RHSV), CI->getType(), "memcmp");
  900.      }
  901.    }
  902.  
  903.    // Constant folding: memcmp(x, y, Len) -> constant (all arguments are const).
  904.    // TODO: This is limited to i8 arrays.
  905.    StringRef LHSStr, RHSStr;
  906.    if (getConstantStringInfo(LHS, LHSStr) &&
  907.        getConstantStringInfo(RHS, RHSStr)) {
  908.      // Make sure we're not reading out-of-bounds memory.
  909.      if (Len > LHSStr.size() || Len > RHSStr.size())
  910.        return nullptr;
  911.      // Fold the memcmp and normalize the result.  This way we get consistent
  912.      // results across multiple platforms.
  913.      uint64_t Ret = 0;
  914.      int Cmp = memcmp(LHSStr.data(), RHSStr.data(), Len);
  915.      if (Cmp < 0)
  916.        Ret = -1;
  917.      else if (Cmp > 0)
  918.        Ret = 1;
  919.      return ConstantInt::get(CI->getType(), Ret);
  920.    }
  921.    return nullptr;
  922.  }
  923.  
  924.  // Most simplifications for memcmp also apply to bcmp.
  925.  Value *LibCallSimplifier::optimizeMemCmpBCmpCommon(CallInst *CI,
  926.                                                     IRBuilder<> &B) {
  927.    Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1);
  928.    Value *Size = CI->getArgOperand(2);
  929.  
  930.    if (LHS == RHS) // memcmp(s,s,x) -> 0
  931.      return Constant::getNullValue(CI->getType());
  932.  
  933.    // Handle constant lengths.
  934.    if (ConstantInt *LenC = dyn_cast<ConstantInt>(Size))
  935.      if (Value *Res = optimizeMemCmpConstantSize(CI, LHS, RHS,
  936.                                                  LenC->getZExtValue(), B, DL))
  937.        return Res;
  938.  
  939.    return nullptr;
  940.  }
  941.  
  942.  Value *LibCallSimplifier::optimizeMemCmp(CallInst *CI, IRBuilder<> &B) {
  943.    if (Value *V = optimizeMemCmpBCmpCommon(CI, B))
  944.      return V;
  945.  
  946.    // memcmp(x, y, Len) == 0 -> bcmp(x, y, Len) == 0
  947.    // bcmp can be more efficient than memcmp because it only has to know that
  948.    // there is a difference, not how different one is to the other.
  949.    if (TLI->has(LibFunc_bcmp) && isOnlyUsedInZeroEqualityComparison(CI)) {
  950.      Value *LHS = CI->getArgOperand(0);
  951.      Value *RHS = CI->getArgOperand(1);
  952.      Value *Size = CI->getArgOperand(2);
  953.      return emitBCmp(LHS, RHS, Size, B, DL, TLI);
  954.    }
  955.  
  956.    return nullptr;
  957.  }
  958.  
  959.  Value *LibCallSimplifier::optimizeBCmp(CallInst *CI, IRBuilder<> &B) {
  960.    return optimizeMemCmpBCmpCommon(CI, B);
  961.  }
  962.  
  963.  Value *LibCallSimplifier::optimizeMemCpy(CallInst *CI, IRBuilder<> &B) {
  964.    // memcpy(x, y, n) -> llvm.memcpy(align 1 x, align 1 y, n)
  965.    B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1,
  966.                   CI->getArgOperand(2));
  967.    return CI->getArgOperand(0);
  968.  }
  969.  
  970.  Value *LibCallSimplifier::optimizeMemMove(CallInst *CI, IRBuilder<> &B) {
  971.    // memmove(x, y, n) -> llvm.memmove(align 1 x, align 1 y, n)
  972.    B.CreateMemMove(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1,
  973.                    CI->getArgOperand(2));
  974.    return CI->getArgOperand(0);
  975.  }
  976.  
  977.  /// Fold memset[_chk](malloc(n), 0, n) --> calloc(1, n).
  978.  Value *LibCallSimplifier::foldMallocMemset(CallInst *Memset, IRBuilder<> &B) {
  979.    // This has to be a memset of zeros (bzero).
  980.    auto *FillValue = dyn_cast<ConstantInt>(Memset->getArgOperand(1));
  981.    if (!FillValue || FillValue->getZExtValue() != 0)
  982.      return nullptr;
  983.  
  984.    // TODO: We should handle the case where the malloc has more than one use.
  985.    // This is necessary to optimize common patterns such as when the result of
  986.    // the malloc is checked against null or when a memset intrinsic is used in
  987.    // place of a memset library call.
  988.    auto *Malloc = dyn_cast<CallInst>(Memset->getArgOperand(0));
  989.    if (!Malloc || !Malloc->hasOneUse())
  990.      return nullptr;
  991.  
  992.    // Is the inner call really malloc()?
  993.    Function *InnerCallee = Malloc->getCalledFunction();
  994.    if (!InnerCallee)
  995.      return nullptr;
  996.  
  997.    LibFunc Func;
  998.    if (!TLI->getLibFunc(*InnerCallee, Func) || !TLI->has(Func) ||
  999.        Func != LibFunc_malloc)
  1000.      return nullptr;
  1001.  
  1002.    // The memset must cover the same number of bytes that are malloc'd.
  1003.    if (Memset->getArgOperand(2) != Malloc->getArgOperand(0))
  1004.      return nullptr;
  1005.  
  1006.    // Replace the malloc with a calloc. We need the data layout to know what the
  1007.    // actual size of a 'size_t' parameter is.
  1008.    B.SetInsertPoint(Malloc->getParent(), ++Malloc->getIterator());
  1009.    const DataLayout &DL = Malloc->getModule()->getDataLayout();
  1010.    IntegerType *SizeType = DL.getIntPtrType(B.GetInsertBlock()->getContext());
  1011.    Value *Calloc = emitCalloc(ConstantInt::get(SizeType, 1),
  1012.                               Malloc->getArgOperand(0), Malloc->getAttributes(),
  1013.                               B, *TLI);
  1014.    if (!Calloc)
  1015.      return nullptr;
  1016.  
  1017.    Malloc->replaceAllUsesWith(Calloc);
  1018.    eraseFromParent(Malloc);
  1019.  
  1020.    return Calloc;
  1021.  }
  1022.  
  1023.  Value *LibCallSimplifier::optimizeMemSet(CallInst *CI, IRBuilder<> &B) {
  1024.    if (auto *Calloc = foldMallocMemset(CI, B))
  1025.      return Calloc;
  1026.  
  1027.    // memset(p, v, n) -> llvm.memset(align 1 p, v, n)
  1028.    Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
  1029.    B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
  1030.    return CI->getArgOperand(0);
  1031.  }
  1032.  
  1033.  Value *LibCallSimplifier::optimizeRealloc(CallInst *CI, IRBuilder<> &B) {
  1034.    if (isa<ConstantPointerNull>(CI->getArgOperand(0)))
  1035.      return emitMalloc(CI->getArgOperand(1), B, DL, TLI);
  1036.  
  1037.    return nullptr;
  1038.  }
  1039.  
  1040.  //===----------------------------------------------------------------------===//
  1041.  // Math Library Optimizations
  1042.  //===----------------------------------------------------------------------===//
  1043.  
  1044.  // Replace a libcall \p CI with a call to intrinsic \p IID
  1045.  static Value *replaceUnaryCall(CallInst *CI, IRBuilder<> &B, Intrinsic::ID IID) {
  1046.    // Propagate fast-math flags from the existing call to the new call.
  1047.    IRBuilder<>::FastMathFlagGuard Guard(B);
  1048.    B.setFastMathFlags(CI->getFastMathFlags());
  1049.  
  1050.    Module *M = CI->getModule();
  1051.    Value *V = CI->getArgOperand(0);
  1052.    Function *F = Intrinsic::getDeclaration(M, IID, CI->getType());
  1053.    CallInst *NewCall = B.CreateCall(F, V);
  1054.    NewCall->takeName(CI);
  1055.    return NewCall;
  1056.  }
  1057.  
  1058.  /// Return a variant of Val with float type.
  1059.  /// Currently this works in two cases: If Val is an FPExtension of a float
  1060.  /// value to something bigger, simply return the operand.
  1061.  /// If Val is a ConstantFP but can be converted to a float ConstantFP without
  1062.  /// loss of precision do so.
  1063.  static Value *valueHasFloatPrecision(Value *Val) {
  1064.    if (FPExtInst *Cast = dyn_cast<FPExtInst>(Val)) {
  1065.      Value *Op = Cast->getOperand(0);
  1066.      if (Op->getType()->isFloatTy())
  1067.        return Op;
  1068.    }
  1069.    if (ConstantFP *Const = dyn_cast<ConstantFP>(Val)) {
  1070.      APFloat F = Const->getValueAPF();
  1071.      bool losesInfo;
  1072.      (void)F.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
  1073.                      &losesInfo);
  1074.      if (!losesInfo)
  1075.        return ConstantFP::get(Const->getContext(), F);
  1076.    }
  1077.    return nullptr;
  1078.  }
  1079.  
  1080.  /// Shrink double -> float functions.
  1081.  static Value *optimizeDoubleFP(CallInst *CI, IRBuilder<> &B,
  1082.                                 bool isBinary, bool isPrecise = false) {
  1083.    Function *CalleeFn = CI->getCalledFunction();
  1084.    if (!CI->getType()->isDoubleTy() || !CalleeFn)
  1085.      return nullptr;
  1086.  
  1087.    // If not all the uses of the function are converted to float, then bail out.
  1088.    // This matters if the precision of the result is more important than the
  1089.    // precision of the arguments.
  1090.    if (isPrecise)
  1091.      for (User *U : CI->users()) {
  1092.        FPTruncInst *Cast = dyn_cast<FPTruncInst>(U);
  1093.        if (!Cast || !Cast->getType()->isFloatTy())
  1094.          return nullptr;
  1095.      }
  1096.  
  1097.    // If this is something like 'g((double) float)', convert to 'gf(float)'.
  1098.    Value *V[2];
  1099.    V[0] = valueHasFloatPrecision(CI->getArgOperand(0));
  1100.    V[1] = isBinary ? valueHasFloatPrecision(CI->getArgOperand(1)) : nullptr;
  1101.    if (!V[0] || (isBinary && !V[1]))
  1102.      return nullptr;
  1103.  
  1104.    StringRef CalleeNm = CalleeFn->getName();
  1105.    AttributeList CalleeAt = CalleeFn->getAttributes();
  1106.    bool CalleeIn = CalleeFn->isIntrinsic();
  1107.  
  1108.    // If call isn't an intrinsic, check that it isn't within a function with the
  1109.    // same name as the float version of this call, otherwise the result is an
  1110.    // infinite loop.  For example, from MinGW-w64:
  1111.    //
  1112.    // float expf(float val) { return (float) exp((double) val); }
  1113.    if (!CalleeIn) {
  1114.      const Function *Fn = CI->getFunction();
  1115.      StringRef FnName = Fn->getName();
  1116.      if (FnName.back() == 'f' &&
  1117.          FnName.size() == (CalleeNm.size() + 1) &&
  1118.          FnName.startswith(CalleeNm))
  1119.        return nullptr;
  1120.    }
  1121.  
  1122.    // Propagate the math semantics from the current function to the new function.
  1123.    IRBuilder<>::FastMathFlagGuard Guard(B);
  1124.    B.setFastMathFlags(CI->getFastMathFlags());
  1125.  
  1126.    // g((double) float) -> (double) gf(float)
  1127.    Value *R;
  1128.    if (CalleeIn) {
  1129.      Module *M = CI->getModule();
  1130.      Intrinsic::ID IID = CalleeFn->getIntrinsicID();
  1131.      Function *Fn = Intrinsic::getDeclaration(M, IID, B.getFloatTy());
  1132.      R = isBinary ? B.CreateCall(Fn, V) : B.CreateCall(Fn, V[0]);
  1133.    }
  1134.    else
  1135.      R = isBinary ? emitBinaryFloatFnCall(V[0], V[1], CalleeNm, B, CalleeAt)
  1136.                   : emitUnaryFloatFnCall(V[0], CalleeNm, B, CalleeAt);
  1137.  
  1138.    return B.CreateFPExt(R, B.getDoubleTy());
  1139.  }
  1140.  
  1141.  /// Shrink double -> float for unary functions.
  1142.  static Value *optimizeUnaryDoubleFP(CallInst *CI, IRBuilder<> &B,
  1143.                                      bool isPrecise = false) {
  1144.    return optimizeDoubleFP(CI, B, false, isPrecise);
  1145.  }
  1146.  
  1147.  /// Shrink double -> float for binary functions.
  1148.  static Value *optimizeBinaryDoubleFP(CallInst *CI, IRBuilder<> &B,
  1149.                                       bool isPrecise = false) {
  1150.    return optimizeDoubleFP(CI, B, true, isPrecise);
  1151.  }
  1152.  
  1153.  // cabs(z) -> sqrt((creal(z)*creal(z)) + (cimag(z)*cimag(z)))
  1154.  Value *LibCallSimplifier::optimizeCAbs(CallInst *CI, IRBuilder<> &B) {
  1155.    if (!CI->isFast())
  1156.      return nullptr;
  1157.  
  1158.    // Propagate fast-math flags from the existing call to new instructions.
  1159.    IRBuilder<>::FastMathFlagGuard Guard(B);
  1160.    B.setFastMathFlags(CI->getFastMathFlags());
  1161.  
  1162.    Value *Real, *Imag;
  1163.    if (CI->getNumArgOperands() == 1) {
  1164.      Value *Op = CI->getArgOperand(0);
  1165.      assert(Op->getType()->isArrayTy() && "Unexpected signature for cabs!");
  1166.      Real = B.CreateExtractValue(Op, 0, "real");
  1167.      Imag = B.CreateExtractValue(Op, 1, "imag");
  1168.    } else {
  1169.      assert(CI->getNumArgOperands() == 2 && "Unexpected signature for cabs!");
  1170.      Real = CI->getArgOperand(0);
  1171.      Imag = CI->getArgOperand(1);
  1172.    }
  1173.  
  1174.    Value *RealReal = B.CreateFMul(Real, Real);
  1175.    Value *ImagImag = B.CreateFMul(Imag, Imag);
  1176.  
  1177.    Function *FSqrt = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::sqrt,
  1178.                                                CI->getType());
  1179.    return B.CreateCall(FSqrt, B.CreateFAdd(RealReal, ImagImag), "cabs");
  1180.  }
  1181.  
  1182.  static Value *optimizeTrigReflections(CallInst *Call, LibFunc Func,
  1183.                                        IRBuilder<> &B) {
  1184.    if (!isa<FPMathOperator>(Call))
  1185.      return nullptr;
  1186.  
  1187.    IRBuilder<>::FastMathFlagGuard Guard(B);
  1188.    B.setFastMathFlags(Call->getFastMathFlags());
  1189.  
  1190.    // TODO: Can this be shared to also handle LLVM intrinsics?
  1191.    Value *X;
  1192.    switch (Func) {
  1193.    case LibFunc_sin:
  1194.    case LibFunc_sinf:
  1195.    case LibFunc_sinl:
  1196.    case LibFunc_tan:
  1197.    case LibFunc_tanf:
  1198.    case LibFunc_tanl:
  1199.      // sin(-X) --> -sin(X)
  1200.      // tan(-X) --> -tan(X)
  1201.      if (match(Call->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X)))))
  1202.        return B.CreateFNeg(B.CreateCall(Call->getCalledFunction(), X));
  1203.      break;
  1204.    case LibFunc_cos:
  1205.    case LibFunc_cosf:
  1206.    case LibFunc_cosl:
  1207.      // cos(-X) --> cos(X)
  1208.      if (match(Call->getArgOperand(0), m_FNeg(m_Value(X))))
  1209.        return B.CreateCall(Call->getCalledFunction(), X, "cos");
  1210.      break;
  1211.    default:
  1212.      break;
  1213.    }
  1214.    return nullptr;
  1215.  }
  1216.  
  1217.  static Value *getPow(Value *InnerChain[33], unsigned Exp, IRBuilder<> &B) {
  1218.    // Multiplications calculated using Addition Chains.
  1219.    // Refer: http://wwwhomes.uni-bielefeld.de/achim/addition_chain.html
  1220.  
  1221.    assert(Exp != 0 && "Incorrect exponent 0 not handled");
  1222.  
  1223.    if (InnerChain[Exp])
  1224.      return InnerChain[Exp];
  1225.  
  1226.    static const unsigned AddChain[33][2] = {
  1227.        {0, 0}, // Unused.
  1228.        {0, 0}, // Unused (base case = pow1).
  1229.        {1, 1}, // Unused (pre-computed).
  1230.        {1, 2},  {2, 2},   {2, 3},  {3, 3},   {2, 5},  {4, 4},
  1231.        {1, 8},  {5, 5},   {1, 10}, {6, 6},   {4, 9},  {7, 7},
  1232.        {3, 12}, {8, 8},   {8, 9},  {2, 16},  {1, 18}, {10, 10},
  1233.        {6, 15}, {11, 11}, {3, 20}, {12, 12}, {8, 17}, {13, 13},
  1234.        {3, 24}, {14, 14}, {4, 25}, {15, 15}, {3, 28}, {16, 16},
  1235.    };
  1236.  
  1237.    InnerChain[Exp] = B.CreateFMul(getPow(InnerChain, AddChain[Exp][0], B),
  1238.                                   getPow(InnerChain, AddChain[Exp][1], B));
  1239.    return InnerChain[Exp];
  1240.  }
  1241.  
  1242.  /// Use exp{,2}(x * y) for pow(exp{,2}(x), y);
  1243.  /// exp2(n * x) for pow(2.0 ** n, x); exp10(x) for pow(10.0, x);
  1244.  /// exp2(log2(n) * x) for pow(n, x).
  1245.  Value *LibCallSimplifier::replacePowWithExp(CallInst *Pow, IRBuilder<> &B) {
  1246.    Value *Base = Pow->getArgOperand(0), *Expo = Pow->getArgOperand(1);
  1247.    AttributeList Attrs = Pow->getCalledFunction()->getAttributes();
  1248.    Module *Mod = Pow->getModule();
  1249.    Type *Ty = Pow->getType();
  1250.    bool Ignored;
  1251.  
  1252.    // Evaluate special cases related to a nested function as the base.
  1253.  
  1254.    // pow(exp(x), y) -> exp(x * y)
  1255.    // pow(exp2(x), y) -> exp2(x * y)
  1256.    // If exp{,2}() is used only once, it is better to fold two transcendental
  1257.    // math functions into one.  If used again, exp{,2}() would still have to be
  1258.    // called with the original argument, then keep both original transcendental
  1259.    // functions.  However, this transformation is only safe with fully relaxed
  1260.    // math semantics, since, besides rounding differences, it changes overflow
  1261.    // and underflow behavior quite dramatically.  For example:
  1262.    //   pow(exp(1000), 0.001) = pow(inf, 0.001) = inf
  1263.    // Whereas:
  1264.    //   exp(1000 * 0.001) = exp(1)
  1265.    // TODO: Loosen the requirement for fully relaxed math semantics.
  1266.    // TODO: Handle exp10() when more targets have it available.
  1267.    CallInst *BaseFn = dyn_cast<CallInst>(Base);
  1268.    if (BaseFn && BaseFn->hasOneUse() && BaseFn->isFast() && Pow->isFast()) {
  1269.      LibFunc LibFn;
  1270.  
  1271.      Function *CalleeFn = BaseFn->getCalledFunction();
  1272.      if (CalleeFn &&
  1273.          TLI->getLibFunc(CalleeFn->getName(), LibFn) && TLI->has(LibFn)) {
  1274.        StringRef ExpName;
  1275.        Intrinsic::ID ID;
  1276.        Value *ExpFn;
  1277.        LibFunc LibFnFloat;
  1278.        LibFunc LibFnDouble;
  1279.        LibFunc LibFnLongDouble;
  1280.  
  1281.        switch (LibFn) {
  1282.        default:
  1283.          return nullptr;
  1284.        case LibFunc_expf:  case LibFunc_exp:  case LibFunc_expl:
  1285.          ExpName = TLI->getName(LibFunc_exp);
  1286.          ID = Intrinsic::exp;
  1287.          LibFnFloat = LibFunc_expf;
  1288.          LibFnDouble = LibFunc_exp;
  1289.          LibFnLongDouble = LibFunc_expl;
  1290.          break;
  1291.        case LibFunc_exp2f: case LibFunc_exp2: case LibFunc_exp2l:
  1292.          ExpName = TLI->getName(LibFunc_exp2);
  1293.          ID = Intrinsic::exp2;
  1294.          LibFnFloat = LibFunc_exp2f;
  1295.          LibFnDouble = LibFunc_exp2;
  1296.          LibFnLongDouble = LibFunc_exp2l;
  1297.          break;
  1298.        }
  1299.  
  1300.        // Create new exp{,2}() with the product as its argument.
  1301.        Value *FMul = B.CreateFMul(BaseFn->getArgOperand(0), Expo, "mul");
  1302.        ExpFn = BaseFn->doesNotAccessMemory()
  1303.                ? B.CreateCall(Intrinsic::getDeclaration(Mod, ID, Ty),
  1304.                               FMul, ExpName)
  1305.                : emitUnaryFloatFnCall(FMul, TLI, LibFnDouble, LibFnFloat,
  1306.                                       LibFnLongDouble, B,
  1307.                                       BaseFn->getAttributes());
  1308.  
  1309.        // Since the new exp{,2}() is different from the original one, dead code
  1310.        // elimination cannot be trusted to remove it, since it may have side
  1311.        // effects (e.g., errno).  When the only consumer for the original
  1312.        // exp{,2}() is pow(), then it has to be explicitly erased.
  1313.        BaseFn->replaceAllUsesWith(ExpFn);
  1314.        eraseFromParent(BaseFn);
  1315.  
  1316.        return ExpFn;
  1317.      }
  1318.    }
  1319.  
  1320.    // Evaluate special cases related to a constant base.
  1321.  
  1322.    const APFloat *BaseF;
  1323.    if (!match(Pow->getArgOperand(0), m_APFloat(BaseF)))
  1324.      return nullptr;
  1325.  
  1326.    // pow(2.0 ** n, x) -> exp2(n * x)
  1327. -  if (hasUnaryFloatFn(TLI, Ty, LibFunc_exp2, LibFunc_exp2f, LibFunc_exp2l)) {
  1328. -    APFloat BaseR = APFloat(1.0);
  1329. -    BaseR.convert(BaseF->getSemantics(), APFloat::rmTowardZero, &Ignored);
  1330. -    BaseR = BaseR / *BaseF;
  1331. -    bool IsInteger = BaseF->isInteger(), IsReciprocal = BaseR.isInteger();
  1332. -    const APFloat *NF = IsReciprocal ? &BaseR : BaseF;
  1333. -    APSInt NI(64, false);
  1334. -    if ((IsInteger || IsReciprocal) &&
  1335. -        NF->convertToInteger(NI, APFloat::rmTowardZero, &Ignored) ==
  1336. -            APFloat::opOK &&
  1337. -        NI > 1 && NI.isPowerOf2()) {
  1338. -      double N = NI.logBase2() * (IsReciprocal ? -1.0 : 1.0);
  1339. -      Value *FMul = B.CreateFMul(Expo, ConstantFP::get(Ty, N), "mul");
  1340. +  APFloat BaseR = APFloat(1.0);
  1341. +  BaseR.convert(BaseF->getSemantics(), APFloat::rmTowardZero, &Ignored);
  1342. +  BaseR = BaseR / *BaseF;
  1343. +  bool IsInteger = BaseF->isInteger(), IsReciprocal = BaseR.isInteger();
  1344. +  const APFloat *NF = IsReciprocal ? &BaseR : BaseF;
  1345. +  APSInt NI(64, false);
  1346. +  if ((IsInteger || IsReciprocal) &&
  1347. +      NF->convertToInteger(NI, APFloat::rmTowardZero, &Ignored) ==
  1348. +          APFloat::opOK &&
  1349. +      NI > 1 && NI.isPowerOf2()) {
  1350. +    int N = NI.logBase2() * (IsReciprocal ? -1 : 1);
  1351. +    if ((isa<SIToFPInst>(Expo) || isa<UIToFPInst>(Expo)) &&
  1352. +        hasUnaryFloatFn(TLI, Ty, LibFunc_ldexp, LibFunc_ldexpf,
  1353. +                        LibFunc_ldexpl)) {
  1354. +      Value *LdExpArg = nullptr;
  1355. +      if (SIToFPInst *ExpoC = dyn_cast<SIToFPInst>(Expo)) {
  1356. +        if (ExpoC->getOperand(0)->getType()->getPrimitiveSizeInBits() <= 32)
  1357. +          LdExpArg = B.CreateSExt(ExpoC->getOperand(0), B.getInt32Ty());
  1358. +      } else if (UIToFPInst *ExpoC = dyn_cast<UIToFPInst>(Expo)) {
  1359. +        if (ExpoC->getOperand(0)->getType()->getPrimitiveSizeInBits() < 32)
  1360. +          LdExpArg = B.CreateZExt(ExpoC->getOperand(0), B.getInt32Ty());
  1361. +      }
  1362. +
  1363. +      if (LdExpArg) {
  1364. +        Value *X = ConstantFP::get(Pow->getType(), 1.0);
  1365. +        Value *Mul = B.CreateMul(LdExpArg, ConstantInt::get(B.getInt32Ty(), N));
  1366. +        return emitBinaryFloatFnCall(X, Mul, "ldexp", B, {});
  1367. +      }
  1368. +    } else if (hasUnaryFloatFn(TLI, Ty, LibFunc_exp2, LibFunc_exp2f,
  1369. +                               LibFunc_exp2l)) {
  1370. +      // pow(2.0 ** n, x) -> exp2(n * x)
  1371. +      Value *FMul = B.CreateFMul(Expo, ConstantFP::get(Ty, (double)N), "mul");
  1372.        if (Pow->doesNotAccessMemory())
  1373.          return B.CreateCall(Intrinsic::getDeclaration(Mod, Intrinsic::exp2, Ty),
  1374.                              FMul, "exp2");
  1375.        else
  1376.          return emitUnaryFloatFnCall(FMul, TLI, LibFunc_exp2, LibFunc_exp2f,
  1377.                                      LibFunc_exp2l, B, Attrs);
  1378.      }
  1379.    }
  1380.  
  1381.    // pow(10.0, x) -> exp10(x)
  1382.    // TODO: There is no exp10() intrinsic yet, but some day there shall be one.
  1383.    if (match(Base, m_SpecificFP(10.0)) &&
  1384.        hasUnaryFloatFn(TLI, Ty, LibFunc_exp10, LibFunc_exp10f, LibFunc_exp10l))
  1385.      return emitUnaryFloatFnCall(Expo, TLI, LibFunc_exp10, LibFunc_exp10f,
  1386.                                  LibFunc_exp10l, B, Attrs);
  1387.  
  1388.    // pow(n, x) -> exp2(log2(n) * x)
  1389.    if (Pow->hasOneUse() && Pow->hasApproxFunc() && Pow->hasNoNaNs() &&
  1390.        Pow->hasNoInfs() && BaseF->isNormal() && !BaseF->isNegative()) {
  1391.      Value *Log = nullptr;
  1392.      if (Ty->isFloatTy())
  1393.        Log = ConstantFP::get(Ty, std::log2(BaseF->convertToFloat()));
  1394.      else if (Ty->isDoubleTy())
  1395.        Log = ConstantFP::get(Ty, std::log2(BaseF->convertToDouble()));
  1396.  
  1397.      if (Log) {
  1398.        Value *FMul = B.CreateFMul(Log, Expo, "mul");
  1399.        if (Pow->doesNotAccessMemory()) {
  1400.          return B.CreateCall(Intrinsic::getDeclaration(Mod, Intrinsic::exp2, Ty),
  1401.                              FMul, "exp2");
  1402.        } else {
  1403.          if (hasUnaryFloatFn(TLI, Ty, LibFunc_exp2, LibFunc_exp2f,
  1404.                              LibFunc_exp2l))
  1405.            return emitUnaryFloatFnCall(FMul, TLI, LibFunc_exp2, LibFunc_exp2f,
  1406.                                        LibFunc_exp2l, B, Attrs);
  1407.        }
  1408.      }
  1409.    }
  1410.    return nullptr;
  1411.  }
  1412.  
  1413.  static Value *getSqrtCall(Value *V, AttributeList Attrs, bool NoErrno,
  1414.                            Module *M, IRBuilder<> &B,
  1415.                            const TargetLibraryInfo *TLI) {
  1416.    // If errno is never set, then use the intrinsic for sqrt().
  1417.    if (NoErrno) {
  1418.      Function *SqrtFn =
  1419.          Intrinsic::getDeclaration(M, Intrinsic::sqrt, V->getType());
  1420.      return B.CreateCall(SqrtFn, V, "sqrt");
  1421.    }
  1422.  
  1423.    // Otherwise, use the libcall for sqrt().
  1424.    if (hasUnaryFloatFn(TLI, V->getType(), LibFunc_sqrt, LibFunc_sqrtf,
  1425.                        LibFunc_sqrtl))
  1426.      // TODO: We also should check that the target can in fact lower the sqrt()
  1427.      // libcall. We currently have no way to ask this question, so we ask if
  1428.      // the target has a sqrt() libcall, which is not exactly the same.
  1429.      return emitUnaryFloatFnCall(V, TLI, LibFunc_sqrt, LibFunc_sqrtf,
  1430.                                  LibFunc_sqrtl, B, Attrs);
  1431.  
  1432.    return nullptr;
  1433.  }
  1434.  
  1435.  /// Use square root in place of pow(x, +/-0.5).
  1436.  Value *LibCallSimplifier::replacePowWithSqrt(CallInst *Pow, IRBuilder<> &B) {
  1437.    Value *Sqrt, *Base = Pow->getArgOperand(0), *Expo = Pow->getArgOperand(1);
  1438.    AttributeList Attrs = Pow->getCalledFunction()->getAttributes();
  1439.    Module *Mod = Pow->getModule();
  1440.    Type *Ty = Pow->getType();
  1441.  
  1442.    const APFloat *ExpoF;
  1443.    if (!match(Expo, m_APFloat(ExpoF)) ||
  1444.        (!ExpoF->isExactlyValue(0.5) && !ExpoF->isExactlyValue(-0.5)))
  1445.      return nullptr;
  1446.  
  1447.    Sqrt = getSqrtCall(Base, Attrs, Pow->doesNotAccessMemory(), Mod, B, TLI);
  1448.    if (!Sqrt)
  1449.      return nullptr;
  1450.  
  1451.    // Handle signed zero base by expanding to fabs(sqrt(x)).
  1452.    if (!Pow->hasNoSignedZeros()) {
  1453.      Function *FAbsFn = Intrinsic::getDeclaration(Mod, Intrinsic::fabs, Ty);
  1454.      Sqrt = B.CreateCall(FAbsFn, Sqrt, "abs");
  1455.    }
  1456.  
  1457.    // Handle non finite base by expanding to
  1458.    // (x == -infinity ? +infinity : sqrt(x)).
  1459.    if (!Pow->hasNoInfs()) {
  1460.      Value *PosInf = ConstantFP::getInfinity(Ty),
  1461.            *NegInf = ConstantFP::getInfinity(Ty, true);
  1462.      Value *FCmp = B.CreateFCmpOEQ(Base, NegInf, "isinf");
  1463.      Sqrt = B.CreateSelect(FCmp, PosInf, Sqrt);
  1464.    }
  1465.  
  1466.    // If the exponent is negative, then get the reciprocal.
  1467.    if (ExpoF->isNegative())
  1468.      Sqrt = B.CreateFDiv(ConstantFP::get(Ty, 1.0), Sqrt, "reciprocal");
  1469.  
  1470.    return Sqrt;
  1471.  }
  1472.  
  1473.  static Value *createPowWithIntegerExponent(Value *Base, Value *Expo, Module *M,
  1474.                                             IRBuilder<> &B) {
  1475.    Value *Args[] = {Base, Expo};
  1476.    Function *F = Intrinsic::getDeclaration(M, Intrinsic::powi, Base->getType());
  1477.    return B.CreateCall(F, Args);
  1478.  }
  1479.  
  1480.  Value *LibCallSimplifier::optimizePow(CallInst *Pow, IRBuilder<> &B) {
  1481.    Value *Base = Pow->getArgOperand(0);
  1482.    Value *Expo = Pow->getArgOperand(1);
  1483.    Function *Callee = Pow->getCalledFunction();
  1484.    StringRef Name = Callee->getName();
  1485.    Type *Ty = Pow->getType();
  1486.    Module *M = Pow->getModule();
  1487.    Value *Shrunk = nullptr;
  1488.    bool AllowApprox = Pow->hasApproxFunc();
  1489.    bool Ignored;
  1490.  
  1491.    // Bail out if simplifying libcalls to pow() is disabled.
  1492.    if (!hasUnaryFloatFn(TLI, Ty, LibFunc_pow, LibFunc_powf, LibFunc_powl))
  1493.      return nullptr;
  1494.  
  1495.    // Propagate the math semantics from the call to any created instructions.
  1496.    IRBuilder<>::FastMathFlagGuard Guard(B);
  1497.    B.setFastMathFlags(Pow->getFastMathFlags());
  1498.  
  1499.    // Shrink pow() to powf() if the arguments are single precision,
  1500.    // unless the result is expected to be double precision.
  1501.    if (UnsafeFPShrink && Name == TLI->getName(LibFunc_pow) &&
  1502.        hasFloatVersion(Name))
  1503.      Shrunk = optimizeBinaryDoubleFP(Pow, B, true);
  1504.  
  1505.    // Evaluate special cases related to the base.
  1506.  
  1507.    // pow(1.0, x) -> 1.0
  1508.    if (match(Base, m_FPOne()))
  1509.      return Base;
  1510.  
  1511.    if (Value *Exp = replacePowWithExp(Pow, B))
  1512.      return Exp;
  1513.  
  1514.    // Evaluate special cases related to the exponent.
  1515.  
  1516.    // pow(x, -1.0) -> 1.0 / x
  1517.    if (match(Expo, m_SpecificFP(-1.0)))
  1518.      return B.CreateFDiv(ConstantFP::get(Ty, 1.0), Base, "reciprocal");
  1519.  
  1520.    // pow(x, 0.0) -> 1.0
  1521.    if (match(Expo, m_SpecificFP(0.0)))
  1522.      return ConstantFP::get(Ty, 1.0);
  1523.  
  1524.    // pow(x, 1.0) -> x
  1525.    if (match(Expo, m_FPOne()))
  1526.      return Base;
  1527.  
  1528.    // pow(x, 2.0) -> x * x
  1529.    if (match(Expo, m_SpecificFP(2.0)))
  1530.      return B.CreateFMul(Base, Base, "square");
  1531.  
  1532.    if (Value *Sqrt = replacePowWithSqrt(Pow, B))
  1533.      return Sqrt;
  1534.  
  1535.    // pow(x, n) -> x * x * x * ...
  1536.    const APFloat *ExpoF;
  1537.    if (AllowApprox && match(Expo, m_APFloat(ExpoF))) {
  1538.      // We limit to a max of 7 multiplications, thus the maximum exponent is 32.
  1539.      // If the exponent is an integer+0.5 we generate a call to sqrt and an
  1540.      // additional fmul.
  1541.      // TODO: This whole transformation should be backend specific (e.g. some
  1542.      //       backends might prefer libcalls or the limit for the exponent might
  1543.      //       be different) and it should also consider optimizing for size.
  1544.      APFloat LimF(ExpoF->getSemantics(), 33.0),
  1545.              ExpoA(abs(*ExpoF));
  1546.      if (ExpoA.compare(LimF) == APFloat::cmpLessThan) {
  1547.        // This transformation applies to integer or integer+0.5 exponents only.
  1548.        // For integer+0.5, we create a sqrt(Base) call.
  1549.        Value *Sqrt = nullptr;
  1550.        if (!ExpoA.isInteger()) {
  1551.          APFloat Expo2 = ExpoA;
  1552.          // To check if ExpoA is an integer + 0.5, we add it to itself. If there
  1553.          // is no floating point exception and the result is an integer, then
  1554.          // ExpoA == integer + 0.5
  1555.          if (Expo2.add(ExpoA, APFloat::rmNearestTiesToEven) != APFloat::opOK)
  1556.            return nullptr;
  1557.  
  1558.          if (!Expo2.isInteger())
  1559.            return nullptr;
  1560.  
  1561.          Sqrt = getSqrtCall(Base, Pow->getCalledFunction()->getAttributes(),
  1562.                             Pow->doesNotAccessMemory(), M, B, TLI);
  1563.        }
  1564.  
  1565.        // We will memoize intermediate products of the Addition Chain.
  1566.        Value *InnerChain[33] = {nullptr};
  1567.        InnerChain[1] = Base;
  1568.        InnerChain[2] = B.CreateFMul(Base, Base, "square");
  1569.  
  1570.        // We cannot readily convert a non-double type (like float) to a double.
  1571.        // So we first convert it to something which could be converted to double.
  1572.        ExpoA.convert(APFloat::IEEEdouble(), APFloat::rmTowardZero, &Ignored);
  1573.        Value *FMul = getPow(InnerChain, ExpoA.convertToDouble(), B);
  1574.  
  1575.        // Expand pow(x, y+0.5) to pow(x, y) * sqrt(x).
  1576.        if (Sqrt)
  1577.          FMul = B.CreateFMul(FMul, Sqrt);
  1578.  
  1579.        // If the exponent is negative, then get the reciprocal.
  1580.        if (ExpoF->isNegative())
  1581.          FMul = B.CreateFDiv(ConstantFP::get(Ty, 1.0), FMul, "reciprocal");
  1582.  
  1583.        return FMul;
  1584.      }
  1585.  
  1586.      APSInt IntExpo(32, /*isUnsigned=*/false);
  1587.      // powf(x, n) -> powi(x, n) if n is a constant signed integer value
  1588.      if (ExpoF->isInteger() &&
  1589.          ExpoF->convertToInteger(IntExpo, APFloat::rmTowardZero, &Ignored) ==
  1590.              APFloat::opOK) {
  1591.        return createPowWithIntegerExponent(
  1592.            Base, ConstantInt::get(B.getInt32Ty(), IntExpo), M, B);
  1593.      }
  1594.    }
  1595.  
  1596.    // powf(x, itofp(y)) -> powi(x, y)
  1597.    if (AllowApprox && (isa<SIToFPInst>(Expo) || isa<UIToFPInst>(Expo))) {
  1598.      Value *IntExpo = cast<Instruction>(Expo)->getOperand(0);
  1599.      Value *NewExpo = nullptr;
  1600.      unsigned BitWidth = IntExpo->getType()->getPrimitiveSizeInBits();
  1601.      if (isa<SIToFPInst>(Expo) && BitWidth == 32)
  1602.        NewExpo = IntExpo;
  1603.      else if (BitWidth < 32)
  1604.        NewExpo = isa<SIToFPInst>(Expo) ? B.CreateSExt(IntExpo, B.getInt32Ty())
  1605.                                        : B.CreateZExt(IntExpo, B.getInt32Ty());
  1606.      if (NewExpo)
  1607.        return createPowWithIntegerExponent(Base, NewExpo, M, B);
  1608.    }
  1609.  
  1610.    return Shrunk;
  1611.  }
  1612.  
  1613.  Value *LibCallSimplifier::optimizeExp2(CallInst *CI, IRBuilder<> &B) {
  1614.    Function *Callee = CI->getCalledFunction();
  1615.    Value *Ret = nullptr;
  1616.    StringRef Name = Callee->getName();
  1617.    if (UnsafeFPShrink && Name == "exp2" && hasFloatVersion(Name))
  1618.      Ret = optimizeUnaryDoubleFP(CI, B, true);
  1619.  
  1620.    Value *Op = CI->getArgOperand(0);
  1621.    // Turn exp2(sitofp(x)) -> ldexp(1.0, sext(x))  if sizeof(x) <= 32
  1622.    // Turn exp2(uitofp(x)) -> ldexp(1.0, zext(x))  if sizeof(x) < 32
  1623.    LibFunc LdExp = LibFunc_ldexpl;
  1624.    if (Op->getType()->isFloatTy())
  1625.      LdExp = LibFunc_ldexpf;
  1626.    else if (Op->getType()->isDoubleTy())
  1627.      LdExp = LibFunc_ldexp;
  1628.  
  1629.    if (TLI->has(LdExp)) {
  1630.      Value *LdExpArg = nullptr;
  1631.      if (SIToFPInst *OpC = dyn_cast<SIToFPInst>(Op)) {
  1632.        if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() <= 32)
  1633.          LdExpArg = B.CreateSExt(OpC->getOperand(0), B.getInt32Ty());
  1634.      } else if (UIToFPInst *OpC = dyn_cast<UIToFPInst>(Op)) {
  1635.        if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() < 32)
  1636.          LdExpArg = B.CreateZExt(OpC->getOperand(0), B.getInt32Ty());
  1637.      }
  1638.  
  1639.      if (LdExpArg) {
  1640.        Constant *One = ConstantFP::get(CI->getContext(), APFloat(1.0f));
  1641.        if (!Op->getType()->isFloatTy())
  1642.          One = ConstantExpr::getFPExtend(One, Op->getType());
  1643.  
  1644.        Module *M = CI->getModule();
  1645.        FunctionCallee NewCallee = M->getOrInsertFunction(
  1646.            TLI->getName(LdExp), Op->getType(), Op->getType(), B.getInt32Ty());
  1647.        CallInst *CI = B.CreateCall(NewCallee, {One, LdExpArg});
  1648.        if (const Function *F = dyn_cast<Function>(Callee->stripPointerCasts()))
  1649.          CI->setCallingConv(F->getCallingConv());
  1650.  
  1651.        return CI;
  1652.      }
  1653.    }
  1654.    return Ret;
  1655.  }
  1656.  
  1657.  Value *LibCallSimplifier::optimizeFMinFMax(CallInst *CI, IRBuilder<> &B) {
  1658.    // If we can shrink the call to a float function rather than a double
  1659.    // function, do that first.
  1660.    Function *Callee = CI->getCalledFunction();
  1661.    StringRef Name = Callee->getName();
  1662.    if ((Name == "fmin" || Name == "fmax") && hasFloatVersion(Name))
  1663.      if (Value *Ret = optimizeBinaryDoubleFP(CI, B))
  1664.        return Ret;
  1665.  
  1666.    // The LLVM intrinsics minnum/maxnum correspond to fmin/fmax. Canonicalize to
  1667.    // the intrinsics for improved optimization (for example, vectorization).
  1668.    // No-signed-zeros is implied by the definitions of fmax/fmin themselves.
  1669.    // From the C standard draft WG14/N1256:
  1670.    // "Ideally, fmax would be sensitive to the sign of zero, for example
  1671.   // fmax(-0.0, +0.0) would return +0; however, implementation in software
  1672.   // might be impractical."
  1673.    IRBuilder<>::FastMathFlagGuard Guard(B);
  1674.    FastMathFlags FMF = CI->getFastMathFlags();
  1675.    FMF.setNoSignedZeros();
  1676.    B.setFastMathFlags(FMF);
  1677.  
  1678.    Intrinsic::ID IID = Callee->getName().startswith("fmin") ? Intrinsic::minnum
  1679.                                                             : Intrinsic::maxnum;
  1680.    Function *F = Intrinsic::getDeclaration(CI->getModule(), IID, CI->getType());
  1681.    return B.CreateCall(F, { CI->getArgOperand(0), CI->getArgOperand(1) });
  1682.  }
  1683.  
  1684.  Value *LibCallSimplifier::optimizeLog(CallInst *CI, IRBuilder<> &B) {
  1685.    Function *Callee = CI->getCalledFunction();
  1686.    Value *Ret = nullptr;
  1687.    StringRef Name = Callee->getName();
  1688.    if (UnsafeFPShrink && hasFloatVersion(Name))
  1689.      Ret = optimizeUnaryDoubleFP(CI, B, true);
  1690.  
  1691.    if (!CI->isFast())
  1692.      return Ret;
  1693.    Value *Op1 = CI->getArgOperand(0);
  1694.    auto *OpC = dyn_cast<CallInst>(Op1);
  1695.  
  1696.    // The earlier call must also be 'fast' in order to do these transforms.
  1697.    if (!OpC || !OpC->isFast())
  1698.      return Ret;
  1699.  
  1700.    // log(pow(x,y)) -> y*log(x)
  1701.    // This is only applicable to log, log2, log10.
  1702.    if (Name != "log" && Name != "log2" && Name != "log10")
  1703.      return Ret;
  1704.  
  1705.    IRBuilder<>::FastMathFlagGuard Guard(B);
  1706.    FastMathFlags FMF;
  1707.    FMF.setFast();
  1708.    B.setFastMathFlags(FMF);
  1709.  
  1710.    LibFunc Func;
  1711.    Function *F = OpC->getCalledFunction();
  1712.    if (F && ((TLI->getLibFunc(F->getName(), Func) && TLI->has(Func) &&
  1713.        Func == LibFunc_pow) || F->getIntrinsicID() == Intrinsic::pow))
  1714.      return B.CreateFMul(OpC->getArgOperand(1),
  1715.        emitUnaryFloatFnCall(OpC->getOperand(0), Callee->getName(), B,
  1716.                             Callee->getAttributes()), "mul");
  1717.  
  1718.    // log(exp2(y)) -> y*log(2)
  1719.    if (F && Name == "log" && TLI->getLibFunc(F->getName(), Func) &&
  1720.        TLI->has(Func) && Func == LibFunc_exp2)
  1721.      return B.CreateFMul(
  1722.          OpC->getArgOperand(0),
  1723.          emitUnaryFloatFnCall(ConstantFP::get(CI->getType(), 2.0),
  1724.                               Callee->getName(), B, Callee->getAttributes()),
  1725.          "logmul");
  1726.    return Ret;
  1727.  }
  1728.  
  1729.  Value *LibCallSimplifier::optimizeSqrt(CallInst *CI, IRBuilder<> &B) {
  1730.    Function *Callee = CI->getCalledFunction();
  1731.    Value *Ret = nullptr;
  1732.    // TODO: Once we have a way (other than checking for the existince of the
  1733.    // libcall) to tell whether our target can lower @llvm.sqrt, relax the
  1734.    // condition below.
  1735.    if (TLI->has(LibFunc_sqrtf) && (Callee->getName() == "sqrt" ||
  1736.                                    Callee->getIntrinsicID() == Intrinsic::sqrt))
  1737.      Ret = optimizeUnaryDoubleFP(CI, B, true);
  1738.  
  1739.    if (!CI->isFast())
  1740.      return Ret;
  1741.  
  1742.    Instruction *I = dyn_cast<Instruction>(CI->getArgOperand(0));
  1743.    if (!I || I->getOpcode() != Instruction::FMul || !I->isFast())
  1744.      return Ret;
  1745.  
  1746.    // We're looking for a repeated factor in a multiplication tree,
  1747.    // so we can do this fold: sqrt(x * x) -> fabs(x);
  1748.    // or this fold: sqrt((x * x) * y) -> fabs(x) * sqrt(y).
  1749.    Value *Op0 = I->getOperand(0);
  1750.    Value *Op1 = I->getOperand(1);
  1751.    Value *RepeatOp = nullptr;
  1752.    Value *OtherOp = nullptr;
  1753.    if (Op0 == Op1) {
  1754.      // Simple match: the operands of the multiply are identical.
  1755.      RepeatOp = Op0;
  1756.    } else {
  1757.      // Look for a more complicated pattern: one of the operands is itself
  1758.      // a multiply, so search for a common factor in that multiply.
  1759.      // Note: We don't bother looking any deeper than this first level or for
  1760.      // variations of this pattern because instcombine's visitFMUL and/or the
  1761.      // reassociation pass should give us this form.
  1762.      Value *OtherMul0, *OtherMul1;
  1763.      if (match(Op0, m_FMul(m_Value(OtherMul0), m_Value(OtherMul1)))) {
  1764.        // Pattern: sqrt((x * y) * z)
  1765.        if (OtherMul0 == OtherMul1 && cast<Instruction>(Op0)->isFast()) {
  1766.          // Matched: sqrt((x * x) * z)
  1767.          RepeatOp = OtherMul0;
  1768.          OtherOp = Op1;
  1769.        }
  1770.      }
  1771.    }
  1772.    if (!RepeatOp)
  1773.      return Ret;
  1774.  
  1775.    // Fast math flags for any created instructions should match the sqrt
  1776.    // and multiply.
  1777.    IRBuilder<>::FastMathFlagGuard Guard(B);
  1778.    B.setFastMathFlags(I->getFastMathFlags());
  1779.  
  1780.    // If we found a repeated factor, hoist it out of the square root and
  1781.    // replace it with the fabs of that factor.
  1782.    Module *M = Callee->getParent();
  1783.    Type *ArgType = I->getType();
  1784.    Function *Fabs = Intrinsic::getDeclaration(M, Intrinsic::fabs, ArgType);
  1785.    Value *FabsCall = B.CreateCall(Fabs, RepeatOp, "fabs");
  1786.    if (OtherOp) {
  1787.      // If we found a non-repeated factor, we still need to get its square
  1788.      // root. We then multiply that by the value that was simplified out
  1789.      // of the square root calculation.
  1790.      Function *Sqrt = Intrinsic::getDeclaration(M, Intrinsic::sqrt, ArgType);
  1791.      Value *SqrtCall = B.CreateCall(Sqrt, OtherOp, "sqrt");
  1792.      return B.CreateFMul(FabsCall, SqrtCall);
  1793.    }
  1794.    return FabsCall;
  1795.  }
  1796.  
  1797.  // TODO: Generalize to handle any trig function and its inverse.
  1798.  Value *LibCallSimplifier::optimizeTan(CallInst *CI, IRBuilder<> &B) {
  1799.    Function *Callee = CI->getCalledFunction();
  1800.    Value *Ret = nullptr;
  1801.    StringRef Name = Callee->getName();
  1802.    if (UnsafeFPShrink && Name == "tan" && hasFloatVersion(Name))
  1803.      Ret = optimizeUnaryDoubleFP(CI, B, true);
  1804.  
  1805.    Value *Op1 = CI->getArgOperand(0);
  1806.    auto *OpC = dyn_cast<CallInst>(Op1);
  1807.    if (!OpC)
  1808.      return Ret;
  1809.  
  1810.    // Both calls must be 'fast' in order to remove them.
  1811.    if (!CI->isFast() || !OpC->isFast())
  1812.      return Ret;
  1813.  
  1814.    // tan(atan(x)) -> x
  1815.    // tanf(atanf(x)) -> x
  1816.    // tanl(atanl(x)) -> x
  1817.    LibFunc Func;
  1818.    Function *F = OpC->getCalledFunction();
  1819.    if (F && TLI->getLibFunc(F->getName(), Func) && TLI->has(Func) &&
  1820.        ((Func == LibFunc_atan && Callee->getName() == "tan") ||
  1821.         (Func == LibFunc_atanf && Callee->getName() == "tanf") ||
  1822.         (Func == LibFunc_atanl && Callee->getName() == "tanl")))
  1823.      Ret = OpC->getArgOperand(0);
  1824.    return Ret;
  1825.  }
  1826.  
  1827.  static bool isTrigLibCall(CallInst *CI) {
  1828.    // We can only hope to do anything useful if we can ignore things like errno
  1829.    // and floating-point exceptions.
  1830.    // We already checked the prototype.
  1831.    return CI->hasFnAttr(Attribute::NoUnwind) &&
  1832.           CI->hasFnAttr(Attribute::ReadNone);
  1833.  }
  1834.  
  1835.  static void insertSinCosCall(IRBuilder<> &B, Function *OrigCallee, Value *Arg,
  1836.                               bool UseFloat, Value *&Sin, Value *&Cos,
  1837.                               Value *&SinCos) {
  1838.    Type *ArgTy = Arg->getType();
  1839.    Type *ResTy;
  1840.    StringRef Name;
  1841.  
  1842.    Triple T(OrigCallee->getParent()->getTargetTriple());
  1843.    if (UseFloat) {
  1844.      Name = "__sincospif_stret";
  1845.  
  1846.      assert(T.getArch() != Triple::x86 && "x86 messy and unsupported for now");
  1847.      // x86_64 can't use {float, float} since that would be returned in both
  1848.      // xmm0 and xmm1, which isn't what a real struct would do.
  1849.      ResTy = T.getArch() == Triple::x86_64
  1850.                  ? static_cast<Type *>(VectorType::get(ArgTy, 2))
  1851.                  : static_cast<Type *>(StructType::get(ArgTy, ArgTy));
  1852.    } else {
  1853.      Name = "__sincospi_stret";
  1854.      ResTy = StructType::get(ArgTy, ArgTy);
  1855.    }
  1856.  
  1857.    Module *M = OrigCallee->getParent();
  1858.    FunctionCallee Callee =
  1859.        M->getOrInsertFunction(Name, OrigCallee->getAttributes(), ResTy, ArgTy);
  1860.  
  1861.    if (Instruction *ArgInst = dyn_cast<Instruction>(Arg)) {
  1862.      // If the argument is an instruction, it must dominate all uses so put our
  1863.      // sincos call there.
  1864.      B.SetInsertPoint(ArgInst->getParent(), ++ArgInst->getIterator());
  1865.    } else {
  1866.      // Otherwise (e.g. for a constant) the beginning of the function is as
  1867.      // good a place as any.
  1868.      BasicBlock &EntryBB = B.GetInsertBlock()->getParent()->getEntryBlock();
  1869.      B.SetInsertPoint(&EntryBB, EntryBB.begin());
  1870.    }
  1871.  
  1872.    SinCos = B.CreateCall(Callee, Arg, "sincospi");
  1873.  
  1874.    if (SinCos->getType()->isStructTy()) {
  1875.      Sin = B.CreateExtractValue(SinCos, 0, "sinpi");
  1876.      Cos = B.CreateExtractValue(SinCos, 1, "cospi");
  1877.    } else {
  1878.      Sin = B.CreateExtractElement(SinCos, ConstantInt::get(B.getInt32Ty(), 0),
  1879.                                   "sinpi");
  1880.      Cos = B.CreateExtractElement(SinCos, ConstantInt::get(B.getInt32Ty(), 1),
  1881.                                   "cospi");
  1882.    }
  1883.  }
  1884.  
  1885.  Value *LibCallSimplifier::optimizeSinCosPi(CallInst *CI, IRBuilder<> &B) {
  1886.    // Make sure the prototype is as expected, otherwise the rest of the
  1887.    // function is probably invalid and likely to abort.
  1888.    if (!isTrigLibCall(CI))
  1889.      return nullptr;
  1890.  
  1891.    Value *Arg = CI->getArgOperand(0);
  1892.    SmallVector<CallInst *, 1> SinCalls;
  1893.    SmallVector<CallInst *, 1> CosCalls;
  1894.    SmallVector<CallInst *, 1> SinCosCalls;
  1895.  
  1896.    bool IsFloat = Arg->getType()->isFloatTy();
  1897.  
  1898.    // Look for all compatible sinpi, cospi and sincospi calls with the same
  1899.    // argument. If there are enough (in some sense) we can make the
  1900.    // substitution.
  1901.    Function *F = CI->getFunction();
  1902.    for (User *U : Arg->users())
  1903.      classifyArgUse(U, F, IsFloat, SinCalls, CosCalls, SinCosCalls);
  1904.  
  1905.    // It's only worthwhile if both sinpi and cospi are actually used.
  1906.    if (SinCosCalls.empty() && (SinCalls.empty() || CosCalls.empty()))
  1907.      return nullptr;
  1908.  
  1909.    Value *Sin, *Cos, *SinCos;
  1910.    insertSinCosCall(B, CI->getCalledFunction(), Arg, IsFloat, Sin, Cos, SinCos);
  1911.  
  1912.    auto replaceTrigInsts = [this](SmallVectorImpl<CallInst *> &Calls,
  1913.                                   Value *Res) {
  1914.      for (CallInst *C : Calls)
  1915.        replaceAllUsesWith(C, Res);
  1916.    };
  1917.  
  1918.    replaceTrigInsts(SinCalls, Sin);
  1919.    replaceTrigInsts(CosCalls, Cos);
  1920.    replaceTrigInsts(SinCosCalls, SinCos);
  1921.  
  1922.    return nullptr;
  1923.  }
  1924.  
  1925.  void LibCallSimplifier::classifyArgUse(
  1926.      Value *Val, Function *F, bool IsFloat,
  1927.      SmallVectorImpl<CallInst *> &SinCalls,
  1928.      SmallVectorImpl<CallInst *> &CosCalls,
  1929.      SmallVectorImpl<CallInst *> &SinCosCalls) {
  1930.    CallInst *CI = dyn_cast<CallInst>(Val);
  1931.  
  1932.    if (!CI)
  1933.      return;
  1934.  
  1935.    // Don't consider calls in other functions.
  1936.    if (CI->getFunction() != F)
  1937.      return;
  1938.  
  1939.    Function *Callee = CI->getCalledFunction();
  1940.    LibFunc Func;
  1941.    if (!Callee || !TLI->getLibFunc(*Callee, Func) || !TLI->has(Func) ||
  1942.        !isTrigLibCall(CI))
  1943.      return;
  1944.  
  1945.    if (IsFloat) {
  1946.      if (Func == LibFunc_sinpif)
  1947.        SinCalls.push_back(CI);
  1948.      else if (Func == LibFunc_cospif)
  1949.        CosCalls.push_back(CI);
  1950.      else if (Func == LibFunc_sincospif_stret)
  1951.        SinCosCalls.push_back(CI);
  1952.    } else {
  1953.      if (Func == LibFunc_sinpi)
  1954.        SinCalls.push_back(CI);
  1955.      else if (Func == LibFunc_cospi)
  1956.        CosCalls.push_back(CI);
  1957.      else if (Func == LibFunc_sincospi_stret)
  1958.        SinCosCalls.push_back(CI);
  1959.    }
  1960.  }
  1961.  
  1962.  //===----------------------------------------------------------------------===//
  1963.  // Integer Library Call Optimizations
  1964.  //===----------------------------------------------------------------------===//
  1965.  
  1966.  Value *LibCallSimplifier::optimizeFFS(CallInst *CI, IRBuilder<> &B) {
  1967.    // ffs(x) -> x != 0 ? (i32)llvm.cttz(x)+1 : 0
  1968.    Value *Op = CI->getArgOperand(0);
  1969.    Type *ArgType = Op->getType();
  1970.    Function *F = Intrinsic::getDeclaration(CI->getCalledFunction()->getParent(),
  1971.                                            Intrinsic::cttz, ArgType);
  1972.    Value *V = B.CreateCall(F, {Op, B.getTrue()}, "cttz");
  1973.    V = B.CreateAdd(V, ConstantInt::get(V->getType(), 1));
  1974.    V = B.CreateIntCast(V, B.getInt32Ty(), false);
  1975.  
  1976.    Value *Cond = B.CreateICmpNE(Op, Constant::getNullValue(ArgType));
  1977.    return B.CreateSelect(Cond, V, B.getInt32(0));
  1978.  }
  1979.  
  1980.  Value *LibCallSimplifier::optimizeFls(CallInst *CI, IRBuilder<> &B) {
  1981.    // fls(x) -> (i32)(sizeInBits(x) - llvm.ctlz(x, false))
  1982.    Value *Op = CI->getArgOperand(0);
  1983.    Type *ArgType = Op->getType();
  1984.    Function *F = Intrinsic::getDeclaration(CI->getCalledFunction()->getParent(),
  1985.                                            Intrinsic::ctlz, ArgType);
  1986.    Value *V = B.CreateCall(F, {Op, B.getFalse()}, "ctlz");
  1987.    V = B.CreateSub(ConstantInt::get(V->getType(), ArgType->getIntegerBitWidth()),
  1988.                    V);
  1989.    return B.CreateIntCast(V, CI->getType(), false);
  1990.  }
  1991.  
  1992.  Value *LibCallSimplifier::optimizeAbs(CallInst *CI, IRBuilder<> &B) {
  1993.    // abs(x) -> x <s 0 ? -x : x
  1994.    // The negation has 'nsw' because abs of INT_MIN is undefined.
  1995.    Value *X = CI->getArgOperand(0);
  1996.    Value *IsNeg = B.CreateICmpSLT(X, Constant::getNullValue(X->getType()));
  1997.    Value *NegX = B.CreateNSWNeg(X, "neg");
  1998.    return B.CreateSelect(IsNeg, NegX, X);
  1999.  }
  2000.  
  2001.  Value *LibCallSimplifier::optimizeIsDigit(CallInst *CI, IRBuilder<> &B) {
  2002.    // isdigit(c) -> (c-'0') <u 10
  2003.    Value *Op = CI->getArgOperand(0);
  2004.    Op = B.CreateSub(Op, B.getInt32('0'), "isdigittmp");
  2005.    Op = B.CreateICmpULT(Op, B.getInt32(10), "isdigit");
  2006.    return B.CreateZExt(Op, CI->getType());
  2007.  }
  2008.  
  2009.  Value *LibCallSimplifier::optimizeIsAscii(CallInst *CI, IRBuilder<> &B) {
  2010.    // isascii(c) -> c <u 128
  2011.    Value *Op = CI->getArgOperand(0);
  2012.    Op = B.CreateICmpULT(Op, B.getInt32(128), "isascii");
  2013.    return B.CreateZExt(Op, CI->getType());
  2014.  }
  2015.  
  2016.  Value *LibCallSimplifier::optimizeToAscii(CallInst *CI, IRBuilder<> &B) {
  2017.    // toascii(c) -> c & 0x7f
  2018.    return B.CreateAnd(CI->getArgOperand(0),
  2019.                       ConstantInt::get(CI->getType(), 0x7F));
  2020.  }
  2021.  
  2022.  Value *LibCallSimplifier::optimizeAtoi(CallInst *CI, IRBuilder<> &B) {
  2023.    StringRef Str;
  2024.    if (!getConstantStringInfo(CI->getArgOperand(0), Str))
  2025.      return nullptr;
  2026.  
  2027.    return convertStrToNumber(CI, Str, 10);
  2028.  }
  2029.  
  2030.  Value *LibCallSimplifier::optimizeStrtol(CallInst *CI, IRBuilder<> &B) {
  2031.    StringRef Str;
  2032.    if (!getConstantStringInfo(CI->getArgOperand(0), Str))
  2033.      return nullptr;
  2034.  
  2035.    if (!isa<ConstantPointerNull>(CI->getArgOperand(1)))
  2036.      return nullptr;
  2037.  
  2038.    if (ConstantInt *CInt = dyn_cast<ConstantInt>(CI->getArgOperand(2))) {
  2039.      return convertStrToNumber(CI, Str, CInt->getSExtValue());
  2040.    }
  2041.  
  2042.    return nullptr;
  2043.  }
  2044.  
  2045.  //===----------------------------------------------------------------------===//
  2046.  // Formatting and IO Library Call Optimizations
  2047.  //===----------------------------------------------------------------------===//
  2048.  
  2049.  static bool isReportingError(Function *Callee, CallInst *CI, int StreamArg);
  2050.  
  2051.  Value *LibCallSimplifier::optimizeErrorReporting(CallInst *CI, IRBuilder<> &B,
  2052.                                                   int StreamArg) {
  2053.    Function *Callee = CI->getCalledFunction();
  2054.    // Error reporting calls should be cold, mark them as such.
  2055.    // This applies even to non-builtin calls: it is only a hint and applies to
  2056.    // functions that the frontend might not understand as builtins.
  2057.  
  2058.    // This heuristic was suggested in:
  2059.    // Improving Static Branch Prediction in a Compiler
  2060.    // Brian L. Deitrich, Ben-Chung Cheng, Wen-mei W. Hwu
  2061.    // Proceedings of PACT'98, Oct. 1998, IEEE
  2062.    if (!CI->hasFnAttr(Attribute::Cold) &&
  2063.        isReportingError(Callee, CI, StreamArg)) {
  2064.      CI->addAttribute(AttributeList::FunctionIndex, Attribute::Cold);
  2065.    }
  2066.  
  2067.    return nullptr;
  2068.  }
  2069.  
  2070.  static bool isReportingError(Function *Callee, CallInst *CI, int StreamArg) {
  2071.    if (!Callee || !Callee->isDeclaration())
  2072.      return false;
  2073.  
  2074.    if (StreamArg < 0)
  2075.      return true;
  2076.  
  2077.    // These functions might be considered cold, but only if their stream
  2078.    // argument is stderr.
  2079.  
  2080.    if (StreamArg >= (int)CI->getNumArgOperands())
  2081.      return false;
  2082.    LoadInst *LI = dyn_cast<LoadInst>(CI->getArgOperand(StreamArg));
  2083.    if (!LI)
  2084.      return false;
  2085.    GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getPointerOperand());
  2086.    if (!GV || !GV->isDeclaration())
  2087.      return false;
  2088.    return GV->getName() == "stderr";
  2089.  }
  2090.  
  2091.  Value *LibCallSimplifier::optimizePrintFString(CallInst *CI, IRBuilder<> &B) {
  2092.    // Check for a fixed format string.
  2093.    StringRef FormatStr;
  2094.    if (!getConstantStringInfo(CI->getArgOperand(0), FormatStr))
  2095.      return nullptr;
  2096.  
  2097.    // Empty format string -> noop.
  2098.    if (FormatStr.empty()) // Tolerate printf's declared void.
  2099.      return CI->use_empty() ? (Value *)CI : ConstantInt::get(CI->getType(), 0);
  2100.  
  2101.    // Do not do any of the following transformations if the printf return value
  2102.    // is used, in general the printf return value is not compatible with either
  2103.    // putchar() or puts().
  2104.    if (!CI->use_empty())
  2105.      return nullptr;
  2106.  
  2107.    // printf("x") -> putchar('x'), even for "%" and "%%".
  2108.    if (FormatStr.size() == 1 || FormatStr == "%%")
  2109.      return emitPutChar(B.getInt32(FormatStr[0]), B, TLI);
  2110.  
  2111.    // printf("%s", "a") --> putchar('a')
  2112.    if (FormatStr == "%s" && CI->getNumArgOperands() > 1) {
  2113.      StringRef ChrStr;
  2114.      if (!getConstantStringInfo(CI->getOperand(1), ChrStr))
  2115.        return nullptr;
  2116.      if (ChrStr.size() != 1)
  2117.        return nullptr;
  2118.      return emitPutChar(B.getInt32(ChrStr[0]), B, TLI);
  2119.    }
  2120.  
  2121.    // printf("foo\n") --> puts("foo")
  2122.    if (FormatStr[FormatStr.size() - 1] == '\n' &&
  2123.        FormatStr.find('%') == StringRef::npos) { // No format characters.
  2124.      // Create a string literal with no \n on it.  We expect the constant merge
  2125.      // pass to be run after this pass, to merge duplicate strings.
  2126.      FormatStr = FormatStr.drop_back();
  2127.      Value *GV = B.CreateGlobalString(FormatStr, "str");
  2128.      return emitPutS(GV, B, TLI);
  2129.    }
  2130.  
  2131.    // Optimize specific format strings.
  2132.    // printf("%c", chr) --> putchar(chr)
  2133.    if (FormatStr == "%c" && CI->getNumArgOperands() > 1 &&
  2134.        CI->getArgOperand(1)->getType()->isIntegerTy())
  2135.      return emitPutChar(CI->getArgOperand(1), B, TLI);
  2136.  
  2137.    // printf("%s\n", str) --> puts(str)
  2138.    if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&
  2139.        CI->getArgOperand(1)->getType()->isPointerTy())
  2140.      return emitPutS(CI->getArgOperand(1), B, TLI);
  2141.    return nullptr;
  2142.  }
  2143.  
  2144.  Value *LibCallSimplifier::optimizePrintF(CallInst *CI, IRBuilder<> &B) {
  2145.  
  2146.    Function *Callee = CI->getCalledFunction();
  2147.    FunctionType *FT = Callee->getFunctionType();
  2148.    if (Value *V = optimizePrintFString(CI, B)) {
  2149.      return V;
  2150.    }
  2151.  
  2152.    // printf(format, ...) -> iprintf(format, ...) if no floating point
  2153.    // arguments.
  2154.    if (TLI->has(LibFunc_iprintf) && !callHasFloatingPointArgument(CI)) {
  2155.      Module *M = B.GetInsertBlock()->getParent()->getParent();
  2156.      FunctionCallee IPrintFFn =
  2157.          M->getOrInsertFunction("iprintf", FT, Callee->getAttributes());
  2158.      CallInst *New = cast<CallInst>(CI->clone());
  2159.      New->setCalledFunction(IPrintFFn);
  2160.      B.Insert(New);
  2161.      return New;
  2162.    }
  2163.  
  2164.    // printf(format, ...) -> __small_printf(format, ...) if no 128-bit floating point
  2165.    // arguments.
  2166.    if (TLI->has(LibFunc_small_printf) && !callHasFP128Argument(CI)) {
  2167.      Module *M = B.GetInsertBlock()->getParent()->getParent();
  2168.      auto SmallPrintFFn =
  2169.          M->getOrInsertFunction(TLI->getName(LibFunc_small_printf),
  2170.                                 FT, Callee->getAttributes());
  2171.      CallInst *New = cast<CallInst>(CI->clone());
  2172.      New->setCalledFunction(SmallPrintFFn);
  2173.      B.Insert(New);
  2174.      return New;
  2175.    }
  2176.  
  2177.    return nullptr;
  2178.  }
  2179.  
  2180.  Value *LibCallSimplifier::optimizeSPrintFString(CallInst *CI, IRBuilder<> &B) {
  2181.    // Check for a fixed format string.
  2182.    StringRef FormatStr;
  2183.    if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
  2184.      return nullptr;
  2185.  
  2186.    // If we just have a format string (nothing else crazy) transform it.
  2187.    if (CI->getNumArgOperands() == 2) {
  2188.      // Make sure there's no % in the constant array.  We could try to handle
  2189.      // %% -> % in the future if we cared.
  2190.      if (FormatStr.find('%') != StringRef::npos)
  2191.        return nullptr; // we found a format specifier, bail out.
  2192.  
  2193.      // sprintf(str, fmt) -> llvm.memcpy(align 1 str, align 1 fmt, strlen(fmt)+1)
  2194.      B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1,
  2195.                     ConstantInt::get(DL.getIntPtrType(CI->getContext()),
  2196.                                      FormatStr.size() + 1)); // Copy the null byte.
  2197.      return ConstantInt::get(CI->getType(), FormatStr.size());
  2198.    }
  2199.  
  2200.    // The remaining optimizations require the format string to be "%s" or "%c"
  2201.    // and have an extra operand.
  2202.    if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
  2203.        CI->getNumArgOperands() < 3)
  2204.      return nullptr;
  2205.  
  2206.    // Decode the second character of the format string.
  2207.    if (FormatStr[1] == 'c') {
  2208.      // sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
  2209.      if (!CI->getArgOperand(2)->getType()->isIntegerTy())
  2210.        return nullptr;
  2211.      Value *V = B.CreateTrunc(CI->getArgOperand(2), B.getInt8Ty(), "char");
  2212.      Value *Ptr = castToCStr(CI->getArgOperand(0), B);
  2213.      B.CreateStore(V, Ptr);
  2214.      Ptr = B.CreateGEP(B.getInt8Ty(), Ptr, B.getInt32(1), "nul");
  2215.      B.CreateStore(B.getInt8(0), Ptr);
  2216.  
  2217.      return ConstantInt::get(CI->getType(), 1);
  2218.    }
  2219.  
  2220.    if (FormatStr[1] == 's') {
  2221.      // sprintf(dest, "%s", str) -> llvm.memcpy(align 1 dest, align 1 str,
  2222.      // strlen(str)+1)
  2223.      if (!CI->getArgOperand(2)->getType()->isPointerTy())
  2224.        return nullptr;
  2225.  
  2226.      Value *Len = emitStrLen(CI->getArgOperand(2), B, DL, TLI);
  2227.      if (!Len)
  2228.        return nullptr;
  2229.      Value *IncLen =
  2230.          B.CreateAdd(Len, ConstantInt::get(Len->getType(), 1), "leninc");
  2231.      B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(2), 1, IncLen);
  2232.  
  2233.      // The sprintf result is the unincremented number of bytes in the string.
  2234.      return B.CreateIntCast(Len, CI->getType(), false);
  2235.    }
  2236.    return nullptr;
  2237.  }
  2238.  
  2239.  Value *LibCallSimplifier::optimizeSPrintF(CallInst *CI, IRBuilder<> &B) {
  2240.    Function *Callee = CI->getCalledFunction();
  2241.    FunctionType *FT = Callee->getFunctionType();
  2242.    if (Value *V = optimizeSPrintFString(CI, B)) {
  2243.      return V;
  2244.    }
  2245.  
  2246.    // sprintf(str, format, ...) -> siprintf(str, format, ...) if no floating
  2247.    // point arguments.
  2248.    if (TLI->has(LibFunc_siprintf) && !callHasFloatingPointArgument(CI)) {
  2249.      Module *M = B.GetInsertBlock()->getParent()->getParent();
  2250.      FunctionCallee SIPrintFFn =
  2251.          M->getOrInsertFunction("siprintf", FT, Callee->getAttributes());
  2252.      CallInst *New = cast<CallInst>(CI->clone());
  2253.      New->setCalledFunction(SIPrintFFn);
  2254.      B.Insert(New);
  2255.      return New;
  2256.    }
  2257.  
  2258.    // sprintf(str, format, ...) -> __small_sprintf(str, format, ...) if no 128-bit
  2259.    // floating point arguments.
  2260.    if (TLI->has(LibFunc_small_sprintf) && !callHasFP128Argument(CI)) {
  2261.      Module *M = B.GetInsertBlock()->getParent()->getParent();
  2262.      auto SmallSPrintFFn =
  2263.          M->getOrInsertFunction(TLI->getName(LibFunc_small_sprintf),
  2264.                                 FT, Callee->getAttributes());
  2265.      CallInst *New = cast<CallInst>(CI->clone());
  2266.      New->setCalledFunction(SmallSPrintFFn);
  2267.      B.Insert(New);
  2268.      return New;
  2269.    }
  2270.  
  2271.    return nullptr;
  2272.  }
  2273.  
  2274.  Value *LibCallSimplifier::optimizeSnPrintFString(CallInst *CI, IRBuilder<> &B) {
  2275.    // Check for a fixed format string.
  2276.    StringRef FormatStr;
  2277.    if (!getConstantStringInfo(CI->getArgOperand(2), FormatStr))
  2278.      return nullptr;
  2279.  
  2280.    // Check for size
  2281.    ConstantInt *Size = dyn_cast<ConstantInt>(CI->getArgOperand(1));
  2282.    if (!Size)
  2283.      return nullptr;
  2284.  
  2285.    uint64_t N = Size->getZExtValue();
  2286.  
  2287.    // If we just have a format string (nothing else crazy) transform it.
  2288.    if (CI->getNumArgOperands() == 3) {
  2289.      // Make sure there's no % in the constant array.  We could try to handle
  2290.      // %% -> % in the future if we cared.
  2291.      if (FormatStr.find('%') != StringRef::npos)
  2292.        return nullptr; // we found a format specifier, bail out.
  2293.  
  2294.      if (N == 0)
  2295.        return ConstantInt::get(CI->getType(), FormatStr.size());
  2296.      else if (N < FormatStr.size() + 1)
  2297.        return nullptr;
  2298.  
  2299.      // snprintf(dst, size, fmt) -> llvm.memcpy(align 1 dst, align 1 fmt,
  2300.      // strlen(fmt)+1)
  2301.      B.CreateMemCpy(
  2302.          CI->getArgOperand(0), 1, CI->getArgOperand(2), 1,
  2303.          ConstantInt::get(DL.getIntPtrType(CI->getContext()),
  2304.                           FormatStr.size() + 1)); // Copy the null byte.
  2305.      return ConstantInt::get(CI->getType(), FormatStr.size());
  2306.    }
  2307.  
  2308.    // The remaining optimizations require the format string to be "%s" or "%c"
  2309.    // and have an extra operand.
  2310.    if (FormatStr.size() == 2 && FormatStr[0] == '%' &&
  2311.        CI->getNumArgOperands() == 4) {
  2312.  
  2313.      // Decode the second character of the format string.
  2314.      if (FormatStr[1] == 'c') {
  2315.        if (N == 0)
  2316.          return ConstantInt::get(CI->getType(), 1);
  2317.        else if (N == 1)
  2318.          return nullptr;
  2319.  
  2320.        // snprintf(dst, size, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
  2321.        if (!CI->getArgOperand(3)->getType()->isIntegerTy())
  2322.          return nullptr;
  2323.        Value *V = B.CreateTrunc(CI->getArgOperand(3), B.getInt8Ty(), "char");
  2324.        Value *Ptr = castToCStr(CI->getArgOperand(0), B);
  2325.        B.CreateStore(V, Ptr);
  2326.        Ptr = B.CreateGEP(B.getInt8Ty(), Ptr, B.getInt32(1), "nul");
  2327.        B.CreateStore(B.getInt8(0), Ptr);
  2328.  
  2329.        return ConstantInt::get(CI->getType(), 1);
  2330.      }
  2331.  
  2332.      if (FormatStr[1] == 's') {
  2333.        // snprintf(dest, size, "%s", str) to llvm.memcpy(dest, str, len+1, 1)
  2334.        StringRef Str;
  2335.        if (!getConstantStringInfo(CI->getArgOperand(3), Str))
  2336.          return nullptr;
  2337.  
  2338.        if (N == 0)
  2339.          return ConstantInt::get(CI->getType(), Str.size());
  2340.        else if (N < Str.size() + 1)
  2341.          return nullptr;
  2342.  
  2343.        B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(3), 1,
  2344.                       ConstantInt::get(CI->getType(), Str.size() + 1));
  2345.  
  2346.        // The snprintf result is the unincremented number of bytes in the string.
  2347.        return ConstantInt::get(CI->getType(), Str.size());
  2348.      }
  2349.    }
  2350.    return nullptr;
  2351.  }
  2352.  
  2353.  Value *LibCallSimplifier::optimizeSnPrintF(CallInst *CI, IRBuilder<> &B) {
  2354.    if (Value *V = optimizeSnPrintFString(CI, B)) {
  2355.      return V;
  2356.    }
  2357.  
  2358.    return nullptr;
  2359.  }
  2360.  
  2361.  Value *LibCallSimplifier::optimizeFPrintFString(CallInst *CI, IRBuilder<> &B) {
  2362.    optimizeErrorReporting(CI, B, 0);
  2363.  
  2364.    // All the optimizations depend on the format string.
  2365.    StringRef FormatStr;
  2366.    if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
  2367.      return nullptr;
  2368.  
  2369.    // Do not do any of the following transformations if the fprintf return
  2370.    // value is used, in general the fprintf return value is not compatible
  2371.    // with fwrite(), fputc() or fputs().
  2372.    if (!CI->use_empty())
  2373.      return nullptr;
  2374.  
  2375.    // fprintf(F, "foo") --> fwrite("foo", 3, 1, F)
  2376.    if (CI->getNumArgOperands() == 2) {
  2377.      // Could handle %% -> % if we cared.
  2378.      if (FormatStr.find('%') != StringRef::npos)
  2379.        return nullptr; // We found a format specifier.
  2380.  
  2381.      return emitFWrite(
  2382.          CI->getArgOperand(1),
  2383.          ConstantInt::get(DL.getIntPtrType(CI->getContext()), FormatStr.size()),
  2384.          CI->getArgOperand(0), B, DL, TLI);
  2385.    }
  2386.  
  2387.    // The remaining optimizations require the format string to be "%s" or "%c"
  2388.    // and have an extra operand.
  2389.    if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
  2390.        CI->getNumArgOperands() < 3)
  2391.      return nullptr;
  2392.  
  2393.    // Decode the second character of the format string.
  2394.    if (FormatStr[1] == 'c') {
  2395.      // fprintf(F, "%c", chr) --> fputc(chr, F)
  2396.      if (!CI->getArgOperand(2)->getType()->isIntegerTy())
  2397.        return nullptr;
  2398.      return emitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TLI);
  2399.    }
  2400.  
  2401.    if (FormatStr[1] == 's') {
  2402.      // fprintf(F, "%s", str) --> fputs(str, F)
  2403.      if (!CI->getArgOperand(2)->getType()->isPointerTy())
  2404.        return nullptr;
  2405.      return emitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TLI);
  2406.    }
  2407.    return nullptr;
  2408.  }
  2409.  
  2410.  Value *LibCallSimplifier::optimizeFPrintF(CallInst *CI, IRBuilder<> &B) {
  2411.    Function *Callee = CI->getCalledFunction();
  2412.    FunctionType *FT = Callee->getFunctionType();
  2413.    if (Value *V = optimizeFPrintFString(CI, B)) {
  2414.      return V;
  2415.    }
  2416.  
  2417.    // fprintf(stream, format, ...) -> fiprintf(stream, format, ...) if no
  2418.    // floating point arguments.
  2419.    if (TLI->has(LibFunc_fiprintf) && !callHasFloatingPointArgument(CI)) {
  2420.      Module *M = B.GetInsertBlock()->getParent()->getParent();
  2421.      FunctionCallee FIPrintFFn =
  2422.          M->getOrInsertFunction("fiprintf", FT, Callee->getAttributes());
  2423.      CallInst *New = cast<CallInst>(CI->clone());
  2424.      New->setCalledFunction(FIPrintFFn);
  2425.      B.Insert(New);
  2426.      return New;
  2427.    }
  2428.  
  2429.    // fprintf(stream, format, ...) -> __small_fprintf(stream, format, ...) if no
  2430.    // 128-bit floating point arguments.
  2431.    if (TLI->has(LibFunc_small_fprintf) && !callHasFP128Argument(CI)) {
  2432.      Module *M = B.GetInsertBlock()->getParent()->getParent();
  2433.      auto SmallFPrintFFn =
  2434.          M->getOrInsertFunction(TLI->getName(LibFunc_small_fprintf),
  2435.                                 FT, Callee->getAttributes());
  2436.      CallInst *New = cast<CallInst>(CI->clone());
  2437.      New->setCalledFunction(SmallFPrintFFn);
  2438.      B.Insert(New);
  2439.      return New;
  2440.    }
  2441.  
  2442.    return nullptr;
  2443.  }
  2444.  
  2445.  Value *LibCallSimplifier::optimizeFWrite(CallInst *CI, IRBuilder<> &B) {
  2446.    optimizeErrorReporting(CI, B, 3);
  2447.  
  2448.    // Get the element size and count.
  2449.    ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
  2450.    ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
  2451.    if (SizeC && CountC) {
  2452.      uint64_t Bytes = SizeC->getZExtValue() * CountC->getZExtValue();
  2453.  
  2454.      // If this is writing zero records, remove the call (it's a noop).
  2455.      if (Bytes == 0)
  2456.        return ConstantInt::get(CI->getType(), 0);
  2457.  
  2458.      // If this is writing one byte, turn it into fputc.
  2459.      // This optimisation is only valid, if the return value is unused.
  2460.      if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)
  2461.        Value *Char = B.CreateLoad(B.getInt8Ty(),
  2462.                                   castToCStr(CI->getArgOperand(0), B), "char");
  2463.        Value *NewCI = emitFPutC(Char, CI->getArgOperand(3), B, TLI);
  2464.        return NewCI ? ConstantInt::get(CI->getType(), 1) : nullptr;
  2465.      }
  2466.    }
  2467.  
  2468.    if (isLocallyOpenedFile(CI->getArgOperand(3), CI, B, TLI))
  2469.      return emitFWriteUnlocked(CI->getArgOperand(0), CI->getArgOperand(1),
  2470.                                CI->getArgOperand(2), CI->getArgOperand(3), B, DL,
  2471.                                TLI);
  2472.  
  2473.    return nullptr;
  2474.  }
  2475.  
  2476.  Value *LibCallSimplifier::optimizeFPuts(CallInst *CI, IRBuilder<> &B) {
  2477.    optimizeErrorReporting(CI, B, 1);
  2478.  
  2479.    // Don't rewrite fputs to fwrite when optimising for size because fwrite
  2480.    // requires more arguments and thus extra MOVs are required.
  2481.    bool OptForSize = CI->getFunction()->hasOptSize() ||
  2482.                      llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
  2483.    if (OptForSize)
  2484.      return nullptr;
  2485.  
  2486.    // Check if has any use
  2487.    if (!CI->use_empty()) {
  2488.      if (isLocallyOpenedFile(CI->getArgOperand(1), CI, B, TLI))
  2489.        return emitFPutSUnlocked(CI->getArgOperand(0), CI->getArgOperand(1), B,
  2490.                                 TLI);
  2491.      else
  2492.        // We can't optimize if return value is used.
  2493.        return nullptr;
  2494.    }
  2495.  
  2496.    // fputs(s,F) --> fwrite(s,strlen(s),1,F)
  2497.    uint64_t Len = GetStringLength(CI->getArgOperand(0));
  2498.    if (!Len)
  2499.      return nullptr;
  2500.  
  2501.    // Known to have no uses (see above).
  2502.    return emitFWrite(
  2503.        CI->getArgOperand(0),
  2504.        ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len - 1),
  2505.        CI->getArgOperand(1), B, DL, TLI);
  2506.  }
  2507.  
  2508.  Value *LibCallSimplifier::optimizeFPutc(CallInst *CI, IRBuilder<> &B) {
  2509.    optimizeErrorReporting(CI, B, 1);
  2510.  
  2511.    if (isLocallyOpenedFile(CI->getArgOperand(1), CI, B, TLI))
  2512.      return emitFPutCUnlocked(CI->getArgOperand(0), CI->getArgOperand(1), B,
  2513.                               TLI);
  2514.  
  2515.    return nullptr;
  2516.  }
  2517.  
  2518.  Value *LibCallSimplifier::optimizeFGetc(CallInst *CI, IRBuilder<> &B) {
  2519.    if (isLocallyOpenedFile(CI->getArgOperand(0), CI, B, TLI))
  2520.      return emitFGetCUnlocked(CI->getArgOperand(0), B, TLI);
  2521.  
  2522.    return nullptr;
  2523.  }
  2524.  
  2525.  Value *LibCallSimplifier::optimizeFGets(CallInst *CI, IRBuilder<> &B) {
  2526.    if (isLocallyOpenedFile(CI->getArgOperand(2), CI, B, TLI))
  2527.      return emitFGetSUnlocked(CI->getArgOperand(0), CI->getArgOperand(1),
  2528.                               CI->getArgOperand(2), B, TLI);
  2529.  
  2530.    return nullptr;
  2531.  }
  2532.  
  2533.  Value *LibCallSimplifier::optimizeFRead(CallInst *CI, IRBuilder<> &B) {
  2534.    if (isLocallyOpenedFile(CI->getArgOperand(3), CI, B, TLI))
  2535.      return emitFReadUnlocked(CI->getArgOperand(0), CI->getArgOperand(1),
  2536.                               CI->getArgOperand(2), CI->getArgOperand(3), B, DL,
  2537.                               TLI);
  2538.  
  2539.    return nullptr;
  2540.  }
  2541.  
  2542.  Value *LibCallSimplifier::optimizePuts(CallInst *CI, IRBuilder<> &B) {
  2543.    if (!CI->use_empty())
  2544.      return nullptr;
  2545.  
  2546.    // Check for a constant string.
  2547.    // puts("") -> putchar('\n')
  2548.    StringRef Str;
  2549.    if (getConstantStringInfo(CI->getArgOperand(0), Str) && Str.empty())
  2550.      return emitPutChar(B.getInt32('\n'), B, TLI);
  2551.  
  2552.    return nullptr;
  2553.  }
  2554.  
  2555.  bool LibCallSimplifier::hasFloatVersion(StringRef FuncName) {
  2556.    LibFunc Func;
  2557.    SmallString<20> FloatFuncName = FuncName;
  2558.    FloatFuncName += 'f';
  2559.    if (TLI->getLibFunc(FloatFuncName, Func))
  2560.      return TLI->has(Func);
  2561.    return false;
  2562.  }
  2563.  
  2564.  Value *LibCallSimplifier::optimizeStringMemoryLibCall(CallInst *CI,
  2565.                                                        IRBuilder<> &Builder) {
  2566.    LibFunc Func;
  2567.    Function *Callee = CI->getCalledFunction();
  2568.    // Check for string/memory library functions.
  2569.    if (TLI->getLibFunc(*Callee, Func) && TLI->has(Func)) {
  2570.      // Make sure we never change the calling convention.
  2571.      assert((ignoreCallingConv(Func) ||
  2572.              isCallingConvCCompatible(CI)) &&
  2573.        "Optimizing string/memory libcall would change the calling convention");
  2574.      switch (Func) {
  2575.      case LibFunc_strcat:
  2576.        return optimizeStrCat(CI, Builder);
  2577.      case LibFunc_strncat:
  2578.        return optimizeStrNCat(CI, Builder);
  2579.      case LibFunc_strchr:
  2580.        return optimizeStrChr(CI, Builder);
  2581.      case LibFunc_strrchr:
  2582.        return optimizeStrRChr(CI, Builder);
  2583.      case LibFunc_strcmp:
  2584.        return optimizeStrCmp(CI, Builder);
  2585.      case LibFunc_strncmp:
  2586.        return optimizeStrNCmp(CI, Builder);
  2587.      case LibFunc_strcpy:
  2588.        return optimizeStrCpy(CI, Builder);
  2589.      case LibFunc_stpcpy:
  2590.        return optimizeStpCpy(CI, Builder);
  2591.      case LibFunc_strncpy:
  2592.        return optimizeStrNCpy(CI, Builder);
  2593.      case LibFunc_strlen:
  2594.        return optimizeStrLen(CI, Builder);
  2595.      case LibFunc_strpbrk:
  2596.        return optimizeStrPBrk(CI, Builder);
  2597.      case LibFunc_strtol:
  2598.      case LibFunc_strtod:
  2599.      case LibFunc_strtof:
  2600.      case LibFunc_strtoul:
  2601.      case LibFunc_strtoll:
  2602.      case LibFunc_strtold:
  2603.      case LibFunc_strtoull:
  2604.        return optimizeStrTo(CI, Builder);
  2605.      case LibFunc_strspn:
  2606.        return optimizeStrSpn(CI, Builder);
  2607.      case LibFunc_strcspn:
  2608.        return optimizeStrCSpn(CI, Builder);
  2609.      case LibFunc_strstr:
  2610.        return optimizeStrStr(CI, Builder);
  2611.      case LibFunc_memchr:
  2612.        return optimizeMemChr(CI, Builder);
  2613.      case LibFunc_bcmp:
  2614.        return optimizeBCmp(CI, Builder);
  2615.      case LibFunc_memcmp:
  2616.        return optimizeMemCmp(CI, Builder);
  2617.      case LibFunc_memcpy:
  2618.        return optimizeMemCpy(CI, Builder);
  2619.      case LibFunc_memmove:
  2620.        return optimizeMemMove(CI, Builder);
  2621.      case LibFunc_memset:
  2622.        return optimizeMemSet(CI, Builder);
  2623.      case LibFunc_realloc:
  2624.        return optimizeRealloc(CI, Builder);
  2625.      case LibFunc_wcslen:
  2626.        return optimizeWcslen(CI, Builder);
  2627.      default:
  2628.        break;
  2629.      }
  2630.    }
  2631.    return nullptr;
  2632.  }
  2633.  
  2634.  Value *LibCallSimplifier::optimizeFloatingPointLibCall(CallInst *CI,
  2635.                                                         LibFunc Func,
  2636.                                                         IRBuilder<> &Builder) {
  2637.    // Don't optimize calls that require strict floating point semantics.
  2638.    if (CI->isStrictFP())
  2639.      return nullptr;
  2640.  
  2641.    if (Value *V = optimizeTrigReflections(CI, Func, Builder))
  2642.      return V;
  2643.  
  2644.    switch (Func) {
  2645.    case LibFunc_sinpif:
  2646.    case LibFunc_sinpi:
  2647.    case LibFunc_cospif:
  2648.    case LibFunc_cospi:
  2649.      return optimizeSinCosPi(CI, Builder);
  2650.    case LibFunc_powf:
  2651.    case LibFunc_pow:
  2652.    case LibFunc_powl:
  2653.      return optimizePow(CI, Builder);
  2654.    case LibFunc_exp2l:
  2655.    case LibFunc_exp2:
  2656.    case LibFunc_exp2f:
  2657.      return optimizeExp2(CI, Builder);
  2658.    case LibFunc_fabsf:
  2659.    case LibFunc_fabs:
  2660.    case LibFunc_fabsl:
  2661.      return replaceUnaryCall(CI, Builder, Intrinsic::fabs);
  2662.    case LibFunc_sqrtf:
  2663.    case LibFunc_sqrt:
  2664.    case LibFunc_sqrtl:
  2665.      return optimizeSqrt(CI, Builder);
  2666.    case LibFunc_log:
  2667.    case LibFunc_log10:
  2668.    case LibFunc_log1p:
  2669.    case LibFunc_log2:
  2670.    case LibFunc_logb:
  2671.      return optimizeLog(CI, Builder);
  2672.    case LibFunc_tan:
  2673.    case LibFunc_tanf:
  2674.    case LibFunc_tanl:
  2675.      return optimizeTan(CI, Builder);
  2676.    case LibFunc_ceil:
  2677.      return replaceUnaryCall(CI, Builder, Intrinsic::ceil);
  2678.    case LibFunc_floor:
  2679.      return replaceUnaryCall(CI, Builder, Intrinsic::floor);
  2680.    case LibFunc_round:
  2681.      return replaceUnaryCall(CI, Builder, Intrinsic::round);
  2682.    case LibFunc_nearbyint:
  2683.      return replaceUnaryCall(CI, Builder, Intrinsic::nearbyint);
  2684.    case LibFunc_rint:
  2685.      return replaceUnaryCall(CI, Builder, Intrinsic::rint);
  2686.    case LibFunc_trunc:
  2687.      return replaceUnaryCall(CI, Builder, Intrinsic::trunc);
  2688.    case LibFunc_acos:
  2689.    case LibFunc_acosh:
  2690.    case LibFunc_asin:
  2691.    case LibFunc_asinh:
  2692.    case LibFunc_atan:
  2693.    case LibFunc_atanh:
  2694.    case LibFunc_cbrt:
  2695.    case LibFunc_cosh:
  2696.    case LibFunc_exp:
  2697.    case LibFunc_exp10:
  2698.    case LibFunc_expm1:
  2699.    case LibFunc_cos:
  2700.    case LibFunc_sin:
  2701.    case LibFunc_sinh:
  2702.    case LibFunc_tanh:
  2703.      if (UnsafeFPShrink && hasFloatVersion(CI->getCalledFunction()->getName()))
  2704.        return optimizeUnaryDoubleFP(CI, Builder, true);
  2705.      return nullptr;
  2706.    case LibFunc_copysign:
  2707.      if (hasFloatVersion(CI->getCalledFunction()->getName()))
  2708.        return optimizeBinaryDoubleFP(CI, Builder);
  2709.      return nullptr;
  2710.    case LibFunc_fminf:
  2711.    case LibFunc_fmin:
  2712.    case LibFunc_fminl:
  2713.    case LibFunc_fmaxf:
  2714.    case LibFunc_fmax:
  2715.    case LibFunc_fmaxl:
  2716.      return optimizeFMinFMax(CI, Builder);
  2717.    case LibFunc_cabs:
  2718.    case LibFunc_cabsf:
  2719.    case LibFunc_cabsl:
  2720.      return optimizeCAbs(CI, Builder);
  2721.    default:
  2722.      return nullptr;
  2723.    }
  2724.  }
  2725.  
  2726.  Value *LibCallSimplifier::optimizeCall(CallInst *CI) {
  2727.    // TODO: Split out the code below that operates on FP calls so that
  2728.    //       we can all non-FP calls with the StrictFP attribute to be
  2729.    //       optimized.
  2730.    if (CI->isNoBuiltin())
  2731.      return nullptr;
  2732.  
  2733.    LibFunc Func;
  2734.    Function *Callee = CI->getCalledFunction();
  2735.  
  2736.    SmallVector<OperandBundleDef, 2> OpBundles;
  2737.    CI->getOperandBundlesAsDefs(OpBundles);
  2738.    IRBuilder<> Builder(CI, /*FPMathTag=*/nullptr, OpBundles);
  2739.    bool isCallingConvC = isCallingConvCCompatible(CI);
  2740.  
  2741.    // Command-line parameter overrides instruction attribute.
  2742.    // This can't be moved to optimizeFloatingPointLibCall() because it may be
  2743.    // used by the intrinsic optimizations.
  2744.    if (EnableUnsafeFPShrink.getNumOccurrences() > 0)
  2745.      UnsafeFPShrink = EnableUnsafeFPShrink;
  2746.    else if (isa<FPMathOperator>(CI) && CI->isFast())
  2747.      UnsafeFPShrink = true;
  2748.  
  2749.    // First, check for intrinsics.
  2750.    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
  2751.      if (!isCallingConvC)
  2752.        return nullptr;
  2753.      // The FP intrinsics have corresponding constrained versions so we don't
  2754.      // need to check for the StrictFP attribute here.
  2755.      switch (II->getIntrinsicID()) {
  2756.      case Intrinsic::pow:
  2757.        return optimizePow(CI, Builder);
  2758.      case Intrinsic::exp2:
  2759.        return optimizeExp2(CI, Builder);
  2760.      case Intrinsic::log:
  2761.        return optimizeLog(CI, Builder);
  2762.      case Intrinsic::sqrt:
  2763.        return optimizeSqrt(CI, Builder);
  2764.      // TODO: Use foldMallocMemset() with memset intrinsic.
  2765.      default:
  2766.        return nullptr;
  2767.      }
  2768.    }
  2769.  
  2770.    // Also try to simplify calls to fortified library functions.
  2771.    if (Value *SimplifiedFortifiedCI = FortifiedSimplifier.optimizeCall(CI)) {
  2772.      // Try to further simplify the result.
  2773.      CallInst *SimplifiedCI = dyn_cast<CallInst>(SimplifiedFortifiedCI);
  2774.      if (SimplifiedCI && SimplifiedCI->getCalledFunction()) {
  2775.        // Use an IR Builder from SimplifiedCI if available instead of CI
  2776.        // to guarantee we reach all uses we might replace later on.
  2777.        IRBuilder<> TmpBuilder(SimplifiedCI);
  2778.        if (Value *V = optimizeStringMemoryLibCall(SimplifiedCI, TmpBuilder)) {
  2779.          // If we were able to further simplify, remove the now redundant call.
  2780.          SimplifiedCI->replaceAllUsesWith(V);
  2781.          eraseFromParent(SimplifiedCI);
  2782.          return V;
  2783.        }
  2784.      }
  2785.      return SimplifiedFortifiedCI;
  2786.    }
  2787.  
  2788.    // Then check for known library functions.
  2789.    if (TLI->getLibFunc(*Callee, Func) && TLI->has(Func)) {
  2790.      // We never change the calling convention.
  2791.      if (!ignoreCallingConv(Func) && !isCallingConvC)
  2792.        return nullptr;
  2793.      if (Value *V = optimizeStringMemoryLibCall(CI, Builder))
  2794.        return V;
  2795.      if (Value *V = optimizeFloatingPointLibCall(CI, Func, Builder))
  2796.        return V;
  2797.      switch (Func) {
  2798.      case LibFunc_ffs:
  2799.      case LibFunc_ffsl:
  2800.      case LibFunc_ffsll:
  2801.        return optimizeFFS(CI, Builder);
  2802.      case LibFunc_fls:
  2803.      case LibFunc_flsl:
  2804.      case LibFunc_flsll:
  2805.        return optimizeFls(CI, Builder);
  2806.      case LibFunc_abs:
  2807.      case LibFunc_labs:
  2808.      case LibFunc_llabs:
  2809.        return optimizeAbs(CI, Builder);
  2810.      case LibFunc_isdigit:
  2811.        return optimizeIsDigit(CI, Builder);
  2812.      case LibFunc_isascii:
  2813.        return optimizeIsAscii(CI, Builder);
  2814.      case LibFunc_toascii:
  2815.        return optimizeToAscii(CI, Builder);
  2816.      case LibFunc_atoi:
  2817.      case LibFunc_atol:
  2818.      case LibFunc_atoll:
  2819.        return optimizeAtoi(CI, Builder);
  2820.      case LibFunc_strtol:
  2821.      case LibFunc_strtoll:
  2822.        return optimizeStrtol(CI, Builder);
  2823.      case LibFunc_printf:
  2824.        return optimizePrintF(CI, Builder);
  2825.      case LibFunc_sprintf:
  2826.        return optimizeSPrintF(CI, Builder);
  2827.      case LibFunc_snprintf:
  2828.        return optimizeSnPrintF(CI, Builder);
  2829.      case LibFunc_fprintf:
  2830.        return optimizeFPrintF(CI, Builder);
  2831.      case LibFunc_fwrite:
  2832.        return optimizeFWrite(CI, Builder);
  2833.      case LibFunc_fread:
  2834.        return optimizeFRead(CI, Builder);
  2835.      case LibFunc_fputs:
  2836.        return optimizeFPuts(CI, Builder);
  2837.      case LibFunc_fgets:
  2838.        return optimizeFGets(CI, Builder);
  2839.      case LibFunc_fputc:
  2840.        return optimizeFPutc(CI, Builder);
  2841.      case LibFunc_fgetc:
  2842.        return optimizeFGetc(CI, Builder);
  2843.      case LibFunc_puts:
  2844.        return optimizePuts(CI, Builder);
  2845.      case LibFunc_perror:
  2846.        return optimizeErrorReporting(CI, Builder);
  2847.      case LibFunc_vfprintf:
  2848.      case LibFunc_fiprintf:
  2849.        return optimizeErrorReporting(CI, Builder, 0);
  2850.      default:
  2851.        return nullptr;
  2852.      }
  2853.    }
  2854.    return nullptr;
  2855.  }
  2856.  
  2857.  LibCallSimplifier::LibCallSimplifier(
  2858.      const DataLayout &DL, const TargetLibraryInfo *TLI,
  2859.      OptimizationRemarkEmitter &ORE,
  2860.      BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
  2861.      function_ref<void(Instruction *, Value *)> Replacer,
  2862.      function_ref<void(Instruction *)> Eraser)
  2863.      : FortifiedSimplifier(TLI), DL(DL), TLI(TLI), ORE(ORE), BFI(BFI), PSI(PSI),
  2864.        UnsafeFPShrink(false), Replacer(Replacer), Eraser(Eraser) {}
  2865.  
  2866.  void LibCallSimplifier::replaceAllUsesWith(Instruction *I, Value *With) {
  2867.    // Indirect through the replacer used in this instance.
  2868.    Replacer(I, With);
  2869.  }
  2870.  
  2871.  void LibCallSimplifier::eraseFromParent(Instruction *I) {
  2872.    Eraser(I);
  2873.  }
  2874.  
  2875.  // TODO:
  2876.  //   Additional cases that we need to add to this file:
  2877.  //
  2878.  // cbrt:
  2879.  //   * cbrt(expN(X))  -> expN(x/3)
  2880.  //   * cbrt(sqrt(x))  -> pow(x,1/6)
  2881.  //   * cbrt(cbrt(x))  -> pow(x,1/9)
  2882.  //
  2883.  // exp, expf, expl:
  2884.  //   * exp(log(x))  -> x
  2885.  //
  2886.  // log, logf, logl:
  2887.  //   * log(exp(x))   -> x
  2888.  //   * log(exp(y))   -> y*log(e)
  2889.  //   * log(exp10(y)) -> y*log(10)
  2890.  //   * log(sqrt(x))  -> 0.5*log(x)
  2891.  //
  2892.  // pow, powf, powl:
  2893.  //   * pow(sqrt(x),y) -> pow(x,y*0.5)
  2894.  //   * pow(pow(x,y),z)-> pow(x,y*z)
  2895.  //
  2896.  // signbit:
  2897.  //   * signbit(cnst) -> cnst'
  2898.  //   * signbit(nncst) -> 0 (if pstv is a non-negative constant)
  2899.  //
  2900.  // sqrt, sqrtf, sqrtl:
  2901.  //   * sqrt(expN(x))  -> expN(x*0.5)
  2902.  //   * sqrt(Nroot(x)) -> pow(x,1/(2*N))
  2903.  //   * sqrt(pow(x,y)) -> pow(|x|,y*0.5)
  2904.  //
  2905.  
  2906.  //===----------------------------------------------------------------------===//
  2907.  // Fortified Library Call Optimizations
  2908.  //===----------------------------------------------------------------------===//
  2909.  
  2910.  bool
  2911.  FortifiedLibCallSimplifier::isFortifiedCallFoldable(CallInst *CI,
  2912.                                                      unsigned ObjSizeOp,
  2913.                                                      Optional<unsigned> SizeOp,
  2914.                                                      Optional<unsigned> StrOp,
  2915.                                                      Optional<unsigned> FlagOp) {
  2916.    // If this function takes a flag argument, the implementation may use it to
  2917.    // perform extra checks. Don't fold into the non-checking variant.
  2918.    if (FlagOp) {
  2919.      ConstantInt *Flag = dyn_cast<ConstantInt>(CI->getArgOperand(*FlagOp));
  2920.      if (!Flag || !Flag->isZero())
  2921.        return false;
  2922.    }
  2923.  
  2924.    if (SizeOp && CI->getArgOperand(ObjSizeOp) == CI->getArgOperand(*SizeOp))
  2925.      return true;
  2926.  
  2927.    if (ConstantInt *ObjSizeCI =
  2928.            dyn_cast<ConstantInt>(CI->getArgOperand(ObjSizeOp))) {
  2929.      if (ObjSizeCI->isMinusOne())
  2930.        return true;
  2931.      // If the object size wasn't -1 (unknown), bail out if we were asked to.
  2932.      if (OnlyLowerUnknownSize)
  2933.        return false;
  2934.      if (StrOp) {
  2935.        uint64_t Len = GetStringLength(CI->getArgOperand(*StrOp));
  2936.        // If the length is 0 we don't know how long it is and so we can't
  2937.        // remove the check.
  2938.        if (Len == 0)
  2939.          return false;
  2940.        return ObjSizeCI->getZExtValue() >= Len;
  2941.      }
  2942.  
  2943.      if (SizeOp) {
  2944.        if (ConstantInt *SizeCI =
  2945.                dyn_cast<ConstantInt>(CI->getArgOperand(*SizeOp)))
  2946.          return ObjSizeCI->getZExtValue() >= SizeCI->getZExtValue();
  2947.      }
  2948.    }
  2949.    return false;
  2950.  }
  2951.  
  2952.  Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI,
  2953.                                                       IRBuilder<> &B) {
  2954.    if (isFortifiedCallFoldable(CI, 3, 2)) {
  2955.      B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1,
  2956.                     CI->getArgOperand(2));
  2957.      return CI->getArgOperand(0);
  2958.    }
  2959.    return nullptr;
  2960.  }
  2961.  
  2962.  Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI,
  2963.                                                        IRBuilder<> &B) {
  2964.    if (isFortifiedCallFoldable(CI, 3, 2)) {
  2965.      B.CreateMemMove(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1,
  2966.                      CI->getArgOperand(2));
  2967.      return CI->getArgOperand(0);
  2968.    }
  2969.    return nullptr;
  2970.  }
  2971.  
  2972.  Value *FortifiedLibCallSimplifier::optimizeMemSetChk(CallInst *CI,
  2973.                                                       IRBuilder<> &B) {
  2974.    // TODO: Try foldMallocMemset() here.
  2975.  
  2976.    if (isFortifiedCallFoldable(CI, 3, 2)) {
  2977.      Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
  2978.      B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
  2979.      return CI->getArgOperand(0);
  2980.    }
  2981.    return nullptr;
  2982.  }
  2983.  
  2984.  Value *FortifiedLibCallSimplifier::optimizeStrpCpyChk(CallInst *CI,
  2985.                                                        IRBuilder<> &B,
  2986.                                                        LibFunc Func) {
  2987.    const DataLayout &DL = CI->getModule()->getDataLayout();
  2988.    Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1),
  2989.          *ObjSize = CI->getArgOperand(2);
  2990.  
  2991.    // __stpcpy_chk(x,x,...)  -> x+strlen(x)
  2992.    if (Func == LibFunc_stpcpy_chk && !OnlyLowerUnknownSize && Dst == Src) {
  2993.      Value *StrLen = emitStrLen(Src, B, DL, TLI);
  2994.      return StrLen ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, StrLen) : nullptr;
  2995.    }
  2996.  
  2997.    // If a) we don't have any length information, or b) we know this will
  2998.    // fit then just lower to a plain st[rp]cpy. Otherwise we'll keep our
  2999.    // st[rp]cpy_chk call which may fail at runtime if the size is too long.
  3000.    // TODO: It might be nice to get a maximum length out of the possible
  3001.    // string lengths for varying.
  3002.    if (isFortifiedCallFoldable(CI, 2, None, 1)) {
  3003.      if (Func == LibFunc_strcpy_chk)
  3004.        return emitStrCpy(Dst, Src, B, TLI);
  3005.      else
  3006.        return emitStpCpy(Dst, Src, B, TLI);
  3007.    }
  3008.  
  3009.    if (OnlyLowerUnknownSize)
  3010.      return nullptr;
  3011.  
  3012.    // Maybe we can stil fold __st[rp]cpy_chk to __memcpy_chk.
  3013.    uint64_t Len = GetStringLength(Src);
  3014.    if (Len == 0)
  3015.      return nullptr;
  3016.  
  3017.    Type *SizeTTy = DL.getIntPtrType(CI->getContext());
  3018.    Value *LenV = ConstantInt::get(SizeTTy, Len);
  3019.    Value *Ret = emitMemCpyChk(Dst, Src, LenV, ObjSize, B, DL, TLI);
  3020.    // If the function was an __stpcpy_chk, and we were able to fold it into
  3021.    // a __memcpy_chk, we still need to return the correct end pointer.
  3022.    if (Ret && Func == LibFunc_stpcpy_chk)
  3023.      return B.CreateGEP(B.getInt8Ty(), Dst, ConstantInt::get(SizeTTy, Len - 1));
  3024.    return Ret;
  3025.  }
  3026.  
  3027.  Value *FortifiedLibCallSimplifier::optimizeStrpNCpyChk(CallInst *CI,
  3028.                                                         IRBuilder<> &B,
  3029.                                                         LibFunc Func) {
  3030.    if (isFortifiedCallFoldable(CI, 3, 2)) {
  3031.      if (Func == LibFunc_strncpy_chk)
  3032.        return emitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
  3033.                                 CI->getArgOperand(2), B, TLI);
  3034.      else
  3035.        return emitStpNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
  3036.                           CI->getArgOperand(2), B, TLI);
  3037.    }
  3038.  
  3039.    return nullptr;
  3040.  }
  3041.  
  3042.  Value *FortifiedLibCallSimplifier::optimizeMemCCpyChk(CallInst *CI,
  3043.                                                        IRBuilder<> &B) {
  3044.    if (isFortifiedCallFoldable(CI, 4, 3))
  3045.      return emitMemCCpy(CI->getArgOperand(0), CI->getArgOperand(1),
  3046.                         CI->getArgOperand(2), CI->getArgOperand(3), B, TLI);
  3047.  
  3048.    return nullptr;
  3049.  }
  3050.  
  3051.  Value *FortifiedLibCallSimplifier::optimizeSNPrintfChk(CallInst *CI,
  3052.                                                         IRBuilder<> &B) {
  3053.    if (isFortifiedCallFoldable(CI, 3, 1, None, 2)) {
  3054.      SmallVector<Value *, 8> VariadicArgs(CI->arg_begin() + 5, CI->arg_end());
  3055.      return emitSNPrintf(CI->getArgOperand(0), CI->getArgOperand(1),
  3056.                          CI->getArgOperand(4), VariadicArgs, B, TLI);
  3057.    }
  3058.  
  3059.    return nullptr;
  3060.  }
  3061.  
  3062.  Value *FortifiedLibCallSimplifier::optimizeSPrintfChk(CallInst *CI,
  3063.                                                        IRBuilder<> &B) {
  3064.    if (isFortifiedCallFoldable(CI, 2, None, None, 1)) {
  3065.      SmallVector<Value *, 8> VariadicArgs(CI->arg_begin() + 4, CI->arg_end());
  3066.      return emitSPrintf(CI->getArgOperand(0), CI->getArgOperand(3), VariadicArgs,
  3067.                         B, TLI);
  3068.    }
  3069.  
  3070.    return nullptr;
  3071.  }
  3072.  
  3073.  Value *FortifiedLibCallSimplifier::optimizeStrCatChk(CallInst *CI,
  3074.                                                       IRBuilder<> &B) {
  3075.    if (isFortifiedCallFoldable(CI, 2))
  3076.      return emitStrCat(CI->getArgOperand(0), CI->getArgOperand(1), B, TLI);
  3077.  
  3078.    return nullptr;
  3079.  }
  3080.  
  3081.  Value *FortifiedLibCallSimplifier::optimizeStrLCat(CallInst *CI,
  3082.                                                     IRBuilder<> &B) {
  3083.    if (isFortifiedCallFoldable(CI, 3))
  3084.      return emitStrLCat(CI->getArgOperand(0), CI->getArgOperand(1),
  3085.                         CI->getArgOperand(2), B, TLI);
  3086.  
  3087.    return nullptr;
  3088.  }
  3089.  
  3090.  Value *FortifiedLibCallSimplifier::optimizeStrNCatChk(CallInst *CI,
  3091.                                                        IRBuilder<> &B) {
  3092.    if (isFortifiedCallFoldable(CI, 3))
  3093.      return emitStrNCat(CI->getArgOperand(0), CI->getArgOperand(1),
  3094.                         CI->getArgOperand(2), B, TLI);
  3095.  
  3096.    return nullptr;
  3097.  }
  3098.  
  3099.  Value *FortifiedLibCallSimplifier::optimizeStrLCpyChk(CallInst *CI,
  3100.                                                        IRBuilder<> &B) {
  3101.    if (isFortifiedCallFoldable(CI, 3))
  3102.      return emitStrLCpy(CI->getArgOperand(0), CI->getArgOperand(1),
  3103.                         CI->getArgOperand(2), B, TLI);
  3104.  
  3105.    return nullptr;
  3106.  }
  3107.  
  3108.  Value *FortifiedLibCallSimplifier::optimizeVSNPrintfChk(CallInst *CI,
  3109.                                                          IRBuilder<> &B) {
  3110.    if (isFortifiedCallFoldable(CI, 3, 1, None, 2))
  3111.      return emitVSNPrintf(CI->getArgOperand(0), CI->getArgOperand(1),
  3112.                           CI->getArgOperand(4), CI->getArgOperand(5), B, TLI);
  3113.  
  3114.    return nullptr;
  3115.  }
  3116.  
  3117.  Value *FortifiedLibCallSimplifier::optimizeVSPrintfChk(CallInst *CI,
  3118.                                                         IRBuilder<> &B) {
  3119.    if (isFortifiedCallFoldable(CI, 2, None, None, 1))
  3120.      return emitVSPrintf(CI->getArgOperand(0), CI->getArgOperand(3),
  3121.                          CI->getArgOperand(4), B, TLI);
  3122.  
  3123.    return nullptr;
  3124.  }
  3125.  
  3126.  Value *FortifiedLibCallSimplifier::optimizeCall(CallInst *CI) {
  3127.    // FIXME: We shouldn't be changing "nobuiltin" or TLI unavailable calls here.
  3128.    // Some clang users checked for _chk libcall availability using:
  3129.    //   __has_builtin(__builtin___memcpy_chk)
  3130.    // When compiling with -fno-builtin, this is always true.
  3131.    // When passing -ffreestanding/-mkernel, which both imply -fno-builtin, we
  3132.    // end up with fortified libcalls, which isn't acceptable in a freestanding
  3133.    // environment which only provides their non-fortified counterparts.
  3134.    //
  3135.    // Until we change clang and/or teach external users to check for availability
  3136.    // differently, disregard the "nobuiltin" attribute and TLI::has.
  3137.    //
  3138.    // PR23093.
  3139.  
  3140.    LibFunc Func;
  3141.    Function *Callee = CI->getCalledFunction();
  3142.  
  3143.    SmallVector<OperandBundleDef, 2> OpBundles;
  3144.    CI->getOperandBundlesAsDefs(OpBundles);
  3145.    IRBuilder<> Builder(CI, /*FPMathTag=*/nullptr, OpBundles);
  3146.    bool isCallingConvC = isCallingConvCCompatible(CI);
  3147.  
  3148.    // First, check that this is a known library functions and that the prototype
  3149.    // is correct.
  3150.    if (!TLI->getLibFunc(*Callee, Func))
  3151.      return nullptr;
  3152.  
  3153.    // We never change the calling convention.
  3154.    if (!ignoreCallingConv(Func) && !isCallingConvC)
  3155.      return nullptr;
  3156.  
  3157.    switch (Func) {
  3158.    case LibFunc_memcpy_chk:
  3159.      return optimizeMemCpyChk(CI, Builder);
  3160.    case LibFunc_memmove_chk:
  3161.      return optimizeMemMoveChk(CI, Builder);
  3162.    case LibFunc_memset_chk:
  3163.      return optimizeMemSetChk(CI, Builder);
  3164.    case LibFunc_stpcpy_chk:
  3165.    case LibFunc_strcpy_chk:
  3166.      return optimizeStrpCpyChk(CI, Builder, Func);
  3167.    case LibFunc_stpncpy_chk:
  3168.    case LibFunc_strncpy_chk:
  3169.      return optimizeStrpNCpyChk(CI, Builder, Func);
  3170.    case LibFunc_memccpy_chk:
  3171.      return optimizeMemCCpyChk(CI, Builder);
  3172.    case LibFunc_snprintf_chk:
  3173.      return optimizeSNPrintfChk(CI, Builder);
  3174.    case LibFunc_sprintf_chk:
  3175.      return optimizeSPrintfChk(CI, Builder);
  3176.    case LibFunc_strcat_chk:
  3177.      return optimizeStrCatChk(CI, Builder);
  3178.    case LibFunc_strlcat_chk:
  3179.      return optimizeStrLCat(CI, Builder);
  3180.    case LibFunc_strncat_chk:
  3181.      return optimizeStrNCatChk(CI, Builder);
  3182.    case LibFunc_strlcpy_chk:
  3183.      return optimizeStrLCpyChk(CI, Builder);
  3184.    case LibFunc_vsnprintf_chk:
  3185.      return optimizeVSNPrintfChk(CI, Builder);
  3186.    case LibFunc_vsprintf_chk:
  3187.      return optimizeVSPrintfChk(CI, Builder);
  3188.    default:
  3189.      break;
  3190.    }
  3191.    return nullptr;
  3192.  }
  3193.  
  3194.  FortifiedLibCallSimplifier::FortifiedLibCallSimplifier(
  3195.      const TargetLibraryInfo *TLI, bool OnlyLowerUnknownSize)
  3196.      : TLI(TLI), OnlyLowerUnknownSize(OnlyLowerUnknownSize) {}
  3197. Index: test/Transforms/InstCombine/pow_fp_int.ll
  3198. ===================================================================
  3199. --- test/Transforms/InstCombine/pow_fp_int.ll   (revision 367718)
  3200. +++ test/Transforms/InstCombine/pow_fp_int.ll   (working copy)
  3201. @@ -1,418 +1,442 @@
  3202.  ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
  3203.  ; RUN: opt -instcombine -S < %s | FileCheck %s
  3204.  
  3205.  ; PR42190
  3206.  
  3207.  define double @pow_sitofp_const_base_fast(i32 %x) {
  3208.  ; CHECK-LABEL: @pow_sitofp_const_base_fast(
  3209.  ; CHECK-NEXT:    [[TMP1:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[X:%.*]])
  3210.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP1]] to double
  3211.  ; CHECK-NEXT:    ret double [[RES]]
  3212.  ;
  3213.    %subfp = sitofp i32 %x to float
  3214.    %pow = tail call afn float @llvm.pow.f32(float 7.000000e+00, float %subfp)
  3215.    %res = fpext float %pow to double
  3216.    ret double %res
  3217.  }
  3218.  
  3219.  define double @pow_uitofp_const_base_fast(i31 %x) {
  3220.  ; CHECK-LABEL: @pow_uitofp_const_base_fast(
  3221.  ; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32
  3222.  ; CHECK-NEXT:    [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]])
  3223.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
  3224.  ; CHECK-NEXT:    ret double [[RES]]
  3225.  ;
  3226.    %subfp = uitofp i31 %x to float
  3227.    %pow = tail call afn float @llvm.pow.f32(float 7.000000e+00, float %subfp)
  3228.    %res = fpext float %pow to double
  3229.    ret double %res
  3230.  }
  3231.  
  3232.  ; CHECK-LABEL: @pow_sitofp_double_const_base_fast(
  3233.  ; CHECK-NEXT:    [[TMP1:%.*]] = call afn double @llvm.powi.f64(double 7.000000e+00, i32 [[X:%.*]])
  3234.  ; CHECK-NEXT:    ret double [[TMP1]]
  3235.  ;
  3236.    %subfp = sitofp i32 %x to double
  3237.    %pow = tail call afn double @llvm.pow.f64(double 7.000000e+00, double %subfp)
  3238.    ret double %pow
  3239.  }
  3240.  
  3241.  ; CHECK-LABEL: @pow_uitofp_double_const_base_fast(
  3242.  ; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32
  3243.  ; CHECK-NEXT:    [[TMP2:%.*]] = call afn double @llvm.powi.f64(double 7.000000e+00, i32 [[TMP1]])
  3244.  ; CHECK-NEXT:    ret double [[TMP2]]
  3245.  ;
  3246.    %subfp = uitofp i31 %x to double
  3247.    %pow = tail call afn double @llvm.pow.f64(double 7.000000e+00, double %subfp)
  3248.    ret double %pow
  3249.  }
  3250.  
  3251.  ; CHECK-LABEL: @pow_sitofp_double_const_base_power_of_2_fast(
  3252. -; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X:%.*]] to float
  3253. -; CHECK-NEXT:    [[MUL:%.*]] = fmul afn float [[SUBFP]], 4.000000e+00
  3254. -; CHECK-NEXT:    [[EXP2:%.*]] = call afn float @llvm.exp2.f32(float [[MUL]])
  3255. -; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
  3256. +; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[X:%.*]], 2
  3257. +; CHECK-NEXT:    [[LDEXPF:%.*]] = call afn float @ldexpf(float 1.000000e+00, i32 [[TMP1]])
  3258. +; CHECK-NEXT:    [[RES:%.*]] = fpext float [[LDEXPF]] to double
  3259.  ; CHECK-NEXT:    ret double [[RES]]
  3260.  ;
  3261.    %subfp = sitofp i32 %x to float
  3262.    %pow = tail call afn float @llvm.pow.f32(float 16.000000e+00, float %subfp)
  3263.    %res = fpext float %pow to double
  3264.    ret double %res
  3265.  }
  3266.  
  3267. +; CHECK-LABEL: @pow_sitofp_double_const_base_power_of_2_fast_recip(
  3268. +; CHECK-NEXT:    [[TMP1:%.*]] = mul i32 [[X:%.*]], -2
  3269. +; CHECK-NEXT:    [[LDEXPF:%.*]] = call afn float @ldexpf(float 1.000000e+00, i32 [[TMP1]])
  3270. +; CHECK-NEXT:    [[RES:%.*]] = fpext float [[LDEXPF]] to double
  3271. +; CHECK-NEXT:    ret double [[RES]]
  3272. +;
  3273. +  %subfp = sitofp i32 %x to float
  3274. +  %pow = tail call afn float @llvm.pow.f32(float 2.500000e-01, float %subfp)
  3275. +  %res = fpext float %pow to double
  3276. +  ret double %res
  3277. +}
  3278. +
  3279. +; CHECK-LABEL: @pow_uitofp_double_const_base_power_of_2_fast_recip(
  3280. +; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32
  3281. +; CHECK-NEXT:    [[TMP2:%.*]] = mul i32 [[TMP1]], -2
  3282. +; CHECK-NEXT:    [[LDEXPF:%.*]] = call afn float @ldexpf(float 1.000000e+00, i32 [[TMP2]])
  3283. +; CHECK-NEXT:    [[RES:%.*]] = fpext float [[LDEXPF]] to double
  3284. +; CHECK-NEXT:    ret double [[RES]]
  3285. +;
  3286. +  %subfp = uitofp i31 %x to float
  3287. +  %pow = tail call afn float @llvm.pow.f32(float 2.500000e-01, float %subfp)
  3288. +  %res = fpext float %pow to double
  3289. +  ret double %res
  3290. +}
  3291. +
  3292.  ; CHECK-LABEL: @pow_uitofp_const_base_power_of_2_fast(
  3293. -; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i31 [[X:%.*]] to float
  3294. -; CHECK-NEXT:    [[MUL:%.*]] = fmul afn float [[SUBFP]], 4.000000e+00
  3295. -; CHECK-NEXT:    [[EXP2:%.*]] = call afn float @llvm.exp2.f32(float [[MUL]])
  3296. -; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
  3297. +; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32
  3298. +; CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP1]], 2
  3299. +; CHECK-NEXT:    [[LDEXPF:%.*]] = call afn float @ldexpf(float 1.000000e+00, i32 [[TMP2]])
  3300. +; CHECK-NEXT:    [[RES:%.*]] = fpext float [[LDEXPF]] to double
  3301.  ; CHECK-NEXT:    ret double [[RES]]
  3302.  ;
  3303.    %subfp = uitofp i31 %x to float
  3304.    %pow = tail call afn float @llvm.pow.f32(float 16.000000e+00, float %subfp)
  3305.    %res = fpext float %pow to double
  3306.    ret double %res
  3307.  }
  3308.  
  3309.  define double @pow_sitofp_float_base_fast(float %base, i32 %x) {
  3310.  ; CHECK-LABEL: @pow_sitofp_float_base_fast(
  3311.  ; CHECK-NEXT:    [[TMP1:%.*]] = call afn float @llvm.powi.f32(float [[BASE:%.*]], i32 [[X:%.*]])
  3312.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP1]] to double
  3313.  ; CHECK-NEXT:    ret double [[RES]]
  3314.  ;
  3315.    %subfp = sitofp i32 %x to float
  3316.    %pow = tail call afn float @llvm.pow.f32(float %base, float %subfp)
  3317.    %res = fpext float %pow to double
  3318.    ret double %res
  3319.  }
  3320.  
  3321.  define double @pow_uitofp_float_base_fast(float %base, i31 %x) {
  3322.  ; CHECK-LABEL: @pow_uitofp_float_base_fast(
  3323.  ; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32
  3324.  ; CHECK-NEXT:    [[TMP2:%.*]] = call afn float @llvm.powi.f32(float [[BASE:%.*]], i32 [[TMP1]])
  3325.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
  3326.  ; CHECK-NEXT:    ret double [[RES]]
  3327.  ;
  3328.    %subfp = uitofp i31 %x to float
  3329.    %pow = tail call afn float @llvm.pow.f32(float %base, float %subfp)
  3330.    %res = fpext float %pow to double
  3331.    ret double %res
  3332.  }
  3333.  
  3334.  define double @pow_sitofp_double_base_fast(double %base, i32 %x) {
  3335.  ; CHECK-LABEL: @pow_sitofp_double_base_fast(
  3336.  ; CHECK-NEXT:    [[TMP1:%.*]] = call afn double @llvm.powi.f64(double [[BASE:%.*]], i32 [[X:%.*]])
  3337.  ; CHECK-NEXT:    ret double [[TMP1]]
  3338.  ;
  3339.    %subfp = sitofp i32 %x to double
  3340.    %res = tail call afn double @llvm.pow.f64(double %base, double %subfp)
  3341.    ret double %res
  3342.  }
  3343.  
  3344.  define double @pow_uitofp_double_base_fast(double %base, i31 %x) {
  3345.  ; CHECK-LABEL: @pow_uitofp_double_base_fast(
  3346.  ; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32
  3347.  ; CHECK-NEXT:    [[TMP2:%.*]] = call afn double @llvm.powi.f64(double [[BASE:%.*]], i32 [[TMP1]])
  3348.  ; CHECK-NEXT:    ret double [[TMP2]]
  3349.  ;
  3350.    %subfp = uitofp i31 %x to double
  3351.    %res = tail call afn double @llvm.pow.f64(double %base, double %subfp)
  3352.    ret double %res
  3353.  }
  3354.  
  3355.  define double @pow_sitofp_const_base_fast_i8(i8 %x) {
  3356.  ; CHECK-LABEL: @pow_sitofp_const_base_fast_i8(
  3357.  ; CHECK-NEXT:    [[TMP1:%.*]] = sext i8 [[X:%.*]] to i32
  3358.  ; CHECK-NEXT:    [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]])
  3359.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
  3360.  ; CHECK-NEXT:    ret double [[RES]]
  3361.  ;
  3362.    %subfp = sitofp i8 %x to float
  3363.    %pow = tail call afn float @llvm.pow.f32(float 7.000000e+00, float %subfp)
  3364.    %res = fpext float %pow to double
  3365.    ret double %res
  3366.  }
  3367.  
  3368.  define double @pow_sitofp_const_base_fast_i16(i16 %x) {
  3369.  ; CHECK-LABEL: @pow_sitofp_const_base_fast_i16(
  3370.  ; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32
  3371.  ; CHECK-NEXT:    [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]])
  3372.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
  3373.  ; CHECK-NEXT:    ret double [[RES]]
  3374.  ;
  3375.    %subfp = sitofp i16 %x to float
  3376.    %pow = tail call afn float @llvm.pow.f32(float 7.000000e+00, float %subfp)
  3377.    %res = fpext float %pow to double
  3378.    ret double %res
  3379.  }
  3380.  
  3381.  
  3382.  define double @pow_uitofp_const_base_fast_i8(i8 %x) {
  3383.  ; CHECK-LABEL: @pow_uitofp_const_base_fast_i8(
  3384.  ; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[X:%.*]] to i32
  3385.  ; CHECK-NEXT:    [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]])
  3386.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
  3387.  ; CHECK-NEXT:    ret double [[RES]]
  3388.  ;
  3389.    %subfp = uitofp i8 %x to float
  3390.    %pow = tail call afn float @llvm.pow.f32(float 7.000000e+00, float %subfp)
  3391.    %res = fpext float %pow to double
  3392.    ret double %res
  3393.  }
  3394.  
  3395.  define double @pow_uitofp_const_base_fast_i16(i16 %x) {
  3396.  ; CHECK-LABEL: @pow_uitofp_const_base_fast_i16(
  3397.  ; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32
  3398.  ; CHECK-NEXT:    [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]])
  3399.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
  3400.  ; CHECK-NEXT:    ret double [[RES]]
  3401.  ;
  3402.    %subfp = uitofp i16 %x to float
  3403.    %pow = tail call afn float @llvm.pow.f32(float 7.000000e+00, float %subfp)
  3404.    %res = fpext float %pow to double
  3405.    ret double %res
  3406.  }
  3407.  
  3408.  define double @powf_exp_const_int_fast(double %base) {
  3409.  ; CHECK-LABEL: @powf_exp_const_int_fast(
  3410.  ; CHECK-NEXT:    [[TMP1:%.*]] = call fast double @llvm.powi.f64(double [[BASE:%.*]], i32 40)
  3411.  ; CHECK-NEXT:    ret double [[TMP1]]
  3412.  ;
  3413.    %res = tail call fast double @llvm.pow.f64(double %base, double 4.000000e+01)
  3414.    ret double %res
  3415.  }
  3416.  
  3417.  define double @powf_exp_const2_int_fast(double %base) {
  3418.  ; CHECK-LABEL: @powf_exp_const2_int_fast(
  3419.  ; CHECK-NEXT:    [[TMP1:%.*]] = call fast double @llvm.powi.f64(double [[BASE:%.*]], i32 -40)
  3420.  ; CHECK-NEXT:    ret double [[TMP1]]
  3421.  ;
  3422.    %res = tail call fast double @llvm.pow.f64(double %base, double -4.000000e+01)
  3423.    ret double %res
  3424.  }
  3425.  
  3426.  ; Negative tests
  3427.  
  3428.  define double @pow_uitofp_const_base_fast_i32(i32 %x) {
  3429.  ; CHECK-LABEL: @pow_uitofp_const_base_fast_i32(
  3430.  ; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
  3431. -; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x4006757{{.*}}
  3432. +; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x4006757680000000
  3433.  ; CHECK-NEXT:    [[EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[MUL]])
  3434.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
  3435.  ; CHECK-NEXT:    ret double [[RES]]
  3436.  ;
  3437.    %subfp = uitofp i32 %x to float
  3438.    %pow = tail call fast float @llvm.pow.f32(float 7.000000e+00, float %subfp)
  3439.    %res = fpext float %pow to double
  3440.    ret double %res
  3441.  }
  3442.  
  3443.  ; CHECK-LABEL: @pow_uitofp_const_base_power_of_2_fast_i32(
  3444.  ; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
  3445.  ; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 4.000000e+00
  3446.  ; CHECK-NEXT:    [[EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[MUL]])
  3447.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
  3448.  ; CHECK-NEXT:    ret double [[RES]]
  3449.  ;
  3450.    %subfp = uitofp i32 %x to float
  3451.    %pow = tail call fast float @llvm.pow.f32(float 16.000000e+00, float %subfp)
  3452.    %res = fpext float %pow to double
  3453.    ret double %res
  3454.  }
  3455.  
  3456.  define double @pow_uitofp_float_base_fast_i32(float %base, i32 %x) {
  3457.  ; CHECK-LABEL: @pow_uitofp_float_base_fast_i32(
  3458.  ; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
  3459.  ; CHECK-NEXT:    [[POW:%.*]] = tail call fast float @llvm.pow.f32(float [[BASE:%.*]], float [[SUBFP]])
  3460.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
  3461.  ; CHECK-NEXT:    ret double [[RES]]
  3462.  ;
  3463.    %subfp = uitofp i32 %x to float
  3464.    %pow = tail call fast float @llvm.pow.f32(float %base, float %subfp)
  3465.    %res = fpext float %pow to double
  3466.    ret double %res
  3467.  }
  3468.  
  3469.  define double @pow_uitofp_double_base_fast_i32(double %base, i32 %x) {
  3470.  ; CHECK-LABEL: @pow_uitofp_double_base_fast_i32(
  3471.  ; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to double
  3472.  ; CHECK-NEXT:    [[RES:%.*]] = tail call fast double @llvm.pow.f64(double [[BASE:%.*]], double [[SUBFP]])
  3473.  ; CHECK-NEXT:    ret double [[RES]]
  3474.  ;
  3475.    %subfp = uitofp i32 %x to double
  3476.    %res = tail call fast double @llvm.pow.f64(double %base, double %subfp)
  3477.    ret double %res
  3478.  }
  3479.  
  3480.  define double @pow_sitofp_const_base_fast_i64(i64 %x) {
  3481.  ; CHECK-LABEL: @pow_sitofp_const_base_fast_i64(
  3482.  ; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i64 [[X:%.*]] to float
  3483. -; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x400675{{.*}}
  3484. +; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x4006757680000000
  3485.  ; CHECK-NEXT:    [[EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[MUL]])
  3486.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
  3487.  ; CHECK-NEXT:    ret double [[RES]]
  3488.  ;
  3489.    %subfp = sitofp i64 %x to float
  3490.    %pow = tail call fast float @llvm.pow.f32(float 7.000000e+00, float %subfp)
  3491.    %res = fpext float %pow to double
  3492.    ret double %res
  3493.  }
  3494.  
  3495.  define double @pow_uitofp_const_base_fast_i64(i64 %x) {
  3496.  ; CHECK-LABEL: @pow_uitofp_const_base_fast_i64(
  3497.  ; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i64 [[X:%.*]] to float
  3498. -; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x400675{{.*}}
  3499. +; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x4006757680000000
  3500.  ; CHECK-NEXT:    [[EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[MUL]])
  3501.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
  3502.  ; CHECK-NEXT:    ret double [[RES]]
  3503.  ;
  3504.    %subfp = uitofp i64 %x to float
  3505.    %pow = tail call fast float @llvm.pow.f32(float 7.000000e+00, float %subfp)
  3506.    %res = fpext float %pow to double
  3507.    ret double %res
  3508.  }
  3509.  
  3510.  define double @pow_sitofp_const_base_no_fast(i32 %x) {
  3511.  ; CHECK-LABEL: @pow_sitofp_const_base_no_fast(
  3512.  ; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X:%.*]] to float
  3513.  ; CHECK-NEXT:    [[POW:%.*]] = tail call float @llvm.pow.f32(float 7.000000e+00, float [[SUBFP]])
  3514.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
  3515.  ; CHECK-NEXT:    ret double [[RES]]
  3516.  ;
  3517.    %subfp = sitofp i32 %x to float
  3518.    %pow = tail call float @llvm.pow.f32(float 7.000000e+00, float %subfp)
  3519.    %res = fpext float %pow to double
  3520.    ret double %res
  3521.  }
  3522.  
  3523.  define double @pow_uitofp_const_base_no_fast(i32 %x) {
  3524.  ; CHECK-LABEL: @pow_uitofp_const_base_no_fast(
  3525.  ; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
  3526.  ; CHECK-NEXT:    [[POW:%.*]] = tail call float @llvm.pow.f32(float 7.000000e+00, float [[SUBFP]])
  3527.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
  3528.  ; CHECK-NEXT:    ret double [[RES]]
  3529.  ;
  3530.    %subfp = uitofp i32 %x to float
  3531.    %pow = tail call float @llvm.pow.f32(float 7.000000e+00, float %subfp)
  3532.    %res = fpext float %pow to double
  3533.    ret double %res
  3534.  }
  3535.  
  3536.  ; CHECK-LABEL: @pow_sitofp_const_base_power_of_2_no_fast(
  3537. -; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X:%.*]] to float
  3538. -; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[SUBFP]], 4.000000e+00
  3539. -; CHECK-NEXT:    [[EXP2:%.*]] = call float @llvm.exp2.f32(float [[MUL]])
  3540. -; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
  3541. +; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[X:%.*]], 2
  3542. +; CHECK-NEXT:    [[LDEXPF:%.*]] = call float @ldexpf(float 1.000000e+00, i32 [[TMP1]])
  3543. +; CHECK-NEXT:    [[RES:%.*]] = fpext float [[LDEXPF]] to double
  3544.  ; CHECK-NEXT:    ret double [[RES]]
  3545.  ;
  3546.    %subfp = sitofp i32 %x to float
  3547.    %pow = tail call float @llvm.pow.f32(float 16.000000e+00, float %subfp)
  3548.    %res = fpext float %pow to double
  3549.    ret double %res
  3550.  }
  3551.  
  3552.  ; CHECK-LABEL: @pow_uitofp_const_base_power_of_2_no_fast(
  3553.  ; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
  3554. -; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[SUBFP]], 4.000000e+00
  3555. -; CHECK-NEXT:    [[EXP2:%.*]] = call float @llvm.exp2.f32(float [[MUL]])
  3556. -; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
  3557. +; CHECK-NEXT:    [[POW:%.*]] = tail call float @llvm.pow.f32(float 1.600000e+01, float [[SUBFP]])
  3558. +; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
  3559.  ; CHECK-NEXT:    ret double [[RES]]
  3560.  ;
  3561.    %subfp = uitofp i32 %x to float
  3562.    %pow = tail call float @llvm.pow.f32(float 16.000000e+00, float %subfp)
  3563.    %res = fpext float %pow to double
  3564.    ret double %res
  3565.  }
  3566.  
  3567.  define double @pow_sitofp_float_base_no_fast(float %base, i32 %x) {
  3568.  ; CHECK-LABEL: @pow_sitofp_float_base_no_fast(
  3569.  ; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X:%.*]] to float
  3570.  ; CHECK-NEXT:    [[POW:%.*]] = tail call float @llvm.pow.f32(float [[BASE:%.*]], float [[SUBFP]])
  3571.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
  3572.  ; CHECK-NEXT:    ret double [[RES]]
  3573.  ;
  3574.    %subfp = sitofp i32 %x to float
  3575.    %pow = tail call float @llvm.pow.f32(float %base, float %subfp)
  3576.    %res = fpext float %pow to double
  3577.    ret double %res
  3578.  }
  3579.  
  3580.  define double @pow_uitofp_float_base_no_fast(float %base, i32 %x) {
  3581.  ; CHECK-LABEL: @pow_uitofp_float_base_no_fast(
  3582.  ; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
  3583.  ; CHECK-NEXT:    [[POW:%.*]] = tail call float @llvm.pow.f32(float [[BASE:%.*]], float [[SUBFP]])
  3584.  ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
  3585.  ; CHECK-NEXT:    ret double [[RES]]
  3586.  ;
  3587.    %subfp = uitofp i32 %x to float
  3588.    %pow = tail call float @llvm.pow.f32(float %base, float %subfp)
  3589.    %res = fpext float %pow to double
  3590.    ret double %res
  3591.  }
  3592.  
  3593.  define double @pow_sitofp_double_base_no_fast(double %base, i32 %x) {
  3594.  ; CHECK-LABEL: @pow_sitofp_double_base_no_fast(
  3595.  ; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X:%.*]] to double
  3596.  ; CHECK-NEXT:    [[POW:%.*]] = tail call double @llvm.pow.f64(double [[BASE:%.*]], double [[SUBFP]])
  3597.  ; CHECK-NEXT:    ret double [[POW]]
  3598.  ;
  3599.    %subfp = sitofp i32 %x to double
  3600.    %pow = tail call double @llvm.pow.f64(double %base, double %subfp)
  3601.    ret double %pow
  3602.  }
  3603.  
  3604.  define double @pow_uitofp_double_base_no_fast(double %base, i32 %x) {
  3605.  ; CHECK-LABEL: @pow_uitofp_double_base_no_fast(
  3606.  ; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to double
  3607.  ; CHECK-NEXT:    [[POW:%.*]] = tail call double @llvm.pow.f64(double [[BASE:%.*]], double [[SUBFP]])
  3608.  ; CHECK-NEXT:    ret double [[POW]]
  3609.  ;
  3610.    %subfp = uitofp i32 %x to double
  3611.    %pow = tail call double @llvm.pow.f64(double %base, double %subfp)
  3612.    ret double %pow
  3613.  }
  3614.  
  3615.  define double @powf_exp_const_int_no_fast(double %base) {
  3616.  ; CHECK-LABEL: @powf_exp_const_int_no_fast(
  3617.  ; CHECK-NEXT:    [[RES:%.*]] = tail call double @llvm.pow.f64(double [[BASE:%.*]], double 4.000000e+01)
  3618.  ; CHECK-NEXT:    ret double [[RES]]
  3619.  ;
  3620.    %res = tail call double @llvm.pow.f64(double %base, double 4.000000e+01)
  3621.    ret double %res
  3622.  }
  3623.  
  3624.  define double @powf_exp_const_not_int_fast(double %base) {
  3625.  ; CHECK-LABEL: @powf_exp_const_not_int_fast(
  3626.  ; CHECK-NEXT:    [[RES:%.*]] = tail call fast double @llvm.pow.f64(double [[BASE:%.*]], double 3.750000e+01)
  3627.  ; CHECK-NEXT:    ret double [[RES]]
  3628.  ;
  3629.    %res = tail call fast double @llvm.pow.f64(double %base, double 3.750000e+01)
  3630.    ret double %res
  3631.  }
  3632.  
  3633.  define double @powf_exp_const_not_int_no_fast(double %base) {
  3634.  ; CHECK-LABEL: @powf_exp_const_not_int_no_fast(
  3635.  ; CHECK-NEXT:    [[RES:%.*]] = tail call double @llvm.pow.f64(double [[BASE:%.*]], double 3.750000e+01)
  3636.  ; CHECK-NEXT:    ret double [[RES]]
  3637.  ;
  3638.    %res = tail call double @llvm.pow.f64(double %base, double 3.750000e+01)
  3639.    ret double %res
  3640.  }
  3641.  
  3642.  define double @powf_exp_const2_int_no_fast(double %base) {
  3643.  ; CHECK-LABEL: @powf_exp_const2_int_no_fast(
  3644.  ; CHECK-NEXT:    [[RES:%.*]] = tail call double @llvm.pow.f64(double [[BASE:%.*]], double -4.000000e+01)
  3645.  ; CHECK-NEXT:    ret double [[RES]]
  3646.  ;
  3647.    %res = tail call double @llvm.pow.f64(double %base, double -4.000000e+01)
  3648.    ret double %res
  3649.  }
  3650.  
  3651.  declare float @llvm.pow.f32(float, float)
  3652.  declare double @llvm.pow.f64(double, double)
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
 
Top