nrdmttt

config.cmake

May 24th, 2022
47
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
CMake 11.84 KB | None | 0 0
  1. # Licensed to the Apache Software Foundation (ASF) under one
  2. # or more contributor license agreements.  See the NOTICE file
  3. # distributed with this work for additional information
  4. # regarding copyright ownership.  The ASF licenses this file
  5. # to you under the Apache License, Version 2.0 (the
  6. # "License"); you may not use this file except in compliance
  7. # with the License.  You may obtain a copy of the License at
  8. #
  9. #   http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing,
  12. # software distributed under the License is distributed on an
  13. # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
  14. # KIND, either express or implied.  See the License for the
  15. # specific language governing permissions and limitations
  16. # under the License.
  17.  
  18. #--------------------------------------------------------------------
  19. #  Template custom cmake configuration for compiling
  20. #
  21. #  This file is used to override the build options in build.
  22. #  If you want to change the configuration, please use the following
  23. #  steps. Assume you are on the root directory. First copy the this
  24. #  file so that any local changes will be ignored by git
  25. #
  26. #  $ mkdir build
  27. #  $ cp cmake/config.cmake build
  28. #
  29. #  Next modify the according entries, and then compile by
  30. #
  31. #  $ cd build
  32. #  $ cmake ..
  33. #
  34. #  Then build in parallel with 8 threads
  35. #
  36. #  $ make -j8
  37. #--------------------------------------------------------------------
  38.  
  39. #---------------------------------------------
  40. # Backend runtimes.
  41. #---------------------------------------------
  42.  
  43. # Whether enable CUDA during compile,
  44. #
  45. # Possible values:
  46. # - ON: enable CUDA with cmake's auto search
  47. # - OFF: disable CUDA
  48. # - /path/to/cuda: use specific path to cuda toolkit
  49. set(USE_CUDA ON)
  50.  
  51. # Whether enable ROCM runtime
  52. #
  53. # Possible values:
  54. # - ON: enable ROCM with cmake's auto search
  55. # - OFF: disable ROCM
  56. # - /path/to/rocm: use specific path to rocm
  57. set(USE_ROCM OFF)
  58.  
  59. # Whether enable SDAccel runtime
  60. set(USE_SDACCEL OFF)
  61.  
  62. # Whether enable Intel FPGA SDK for OpenCL (AOCL) runtime
  63. set(USE_AOCL OFF)
  64.  
  65. # Whether enable OpenCL runtime
  66. #
  67. # Possible values:
  68. # - ON: enable OpenCL with cmake's auto search
  69. # - OFF: disable OpenCL
  70. # - /path/to/opencl-sdk: use specific path to opencl-sdk
  71. set(USE_OPENCL OFF)
  72.  
  73. # Whether enable Metal runtime
  74. set(USE_METAL OFF)
  75.  
  76. # Whether enable Vulkan runtime
  77. #
  78. # Possible values:
  79. # - ON: enable Vulkan with cmake's auto search
  80. # - OFF: disable vulkan
  81. # - /path/to/vulkan-sdk: use specific path to vulkan-sdk
  82. set(USE_VULKAN OFF)
  83.  
  84. # Whether to use spirv-tools.and SPIRV-Headers from Khronos github or gitlab.
  85. #
  86. # Possible values:
  87. # - OFF: not to use
  88. # - /path/to/install: path to your khronis spirv-tools and SPIRV-Headers installation directory
  89. #
  90. set(USE_KHRONOS_SPIRV OFF)
  91.  
  92. # whether enable SPIRV_KHR_DOT_PRODUCT
  93. set(USE_SPIRV_KHR_INTEGER_DOT_PRODUCT OFF)
  94.  
  95. # Whether enable OpenGL runtime
  96. set(USE_OPENGL OFF)
  97.  
  98. # Whether enable MicroTVM runtime
  99. set(USE_MICRO OFF)
  100.  
  101. # Whether enable RPC runtime
  102. set(USE_RPC ON)
  103.  
  104. # Whether to build the C++ RPC server binary
  105. set(USE_CPP_RPC OFF)
  106.  
  107. # Whether to build the iOS RPC server application
  108. set(USE_IOS_RPC OFF)
  109.  
  110. # Whether embed stackvm into the runtime
  111. set(USE_STACKVM_RUNTIME OFF)
  112.  
  113. # Whether enable tiny embedded graph executor.
  114. set(USE_GRAPH_EXECUTOR ON)
  115.  
  116. # Whether enable tiny graph executor with CUDA Graph
  117. set(USE_GRAPH_EXECUTOR_CUDA_GRAPH OFF)
  118.  
  119. # Whether enable pipeline executor.
  120. set(USE_PIPELINE_EXECUTOR OFF)
  121.  
  122. # Whether to enable the profiler for the graph executor and vm
  123. set(USE_PROFILER ON)
  124.  
  125. # Whether enable microTVM standalone runtime
  126. set(USE_MICRO_STANDALONE_RUNTIME OFF)
  127.  
  128. # Whether build with LLVM support
  129. # Requires LLVM version >= 4.0
  130. #
  131. # Possible values:
  132. # - ON: enable llvm with cmake's find search
  133. # - OFF: disable llvm, note this will disable CPU codegen
  134. #        which is needed for most cases
  135. # - /path/to/llvm-config: enable specific LLVM when multiple llvm-dev is available.
  136. set(USE_LLVM ON)
  137.  
  138. #---------------------------------------------
  139. # Contrib libraries
  140. #---------------------------------------------
  141. # Whether to build with BYODT software emulated posit custom datatype
  142. #
  143. # Possible values:
  144. # - ON: enable BYODT posit, requires setting UNIVERSAL_PATH
  145. # - OFF: disable BYODT posit
  146. #
  147. # set(UNIVERSAL_PATH /path/to/stillwater-universal) for ON
  148. set(USE_BYODT_POSIT OFF)
  149.  
  150. # Whether use BLAS, choices: openblas, atlas, apple
  151. set(USE_BLAS none)
  152.  
  153. # Whether to use MKL
  154. # Possible values:
  155. # - ON: Enable MKL
  156. # - /path/to/mkl: mkl root path
  157. # - OFF: Disable MKL
  158. # set(USE_MKL /opt/intel/mkl) for UNIX
  159. # set(USE_MKL ../IntelSWTools/compilers_and_libraries_2018/windows/mkl) for WIN32
  160. # set(USE_MKL <path to venv or site-packages directory>) if using `pip install mkl`
  161. set(USE_MKL OFF)
  162.  
  163. # Whether use MKLDNN library, choices: ON, OFF, path to mkldnn library
  164. set(USE_MKLDNN OFF)
  165.  
  166. # Whether use OpenMP thread pool, choices: gnu, intel
  167. # Note: "gnu" uses gomp library, "intel" uses iomp5 library
  168. set(USE_OPENMP none)
  169.  
  170. # Whether use contrib.random in runtime
  171. set(USE_RANDOM ON)
  172.  
  173. # Whether use NNPack
  174. set(USE_NNPACK ON)
  175.  
  176. # Possible values:
  177. # - ON: enable tflite with cmake's find search
  178. # - OFF: disable tflite
  179. # - /path/to/libtensorflow-lite.a: use specific path to tensorflow lite library
  180. set(USE_TFLITE ~/TVM/tflite_build/)
  181.  
  182. # /path/to/tensorflow: tensorflow root path when use tflite library
  183. set(USE_TENSORFLOW_PATH ~/TVM/tensorflow/)
  184.  
  185. # Required for full builds with TFLite. Not needed for runtime with TFLite.
  186. # /path/to/flatbuffers: flatbuffers root path when using tflite library
  187. set(USE_FLATBUFFERS_PATH ~/TVM/tflite_build/flatbuffers)
  188.  
  189. # Possible values:
  190. # - OFF: disable tflite support for edgetpu
  191. # - /path/to/edgetpu: use specific path to edgetpu library
  192. set(USE_EDGETPU OFF)
  193.  
  194. # Possible values:
  195. # - ON: enable cuDNN with cmake's auto search in CUDA directory
  196. # - OFF: disable cuDNN
  197. # - /path/to/cudnn: use specific path to cuDNN path
  198. set(USE_CUDNN on)
  199.  
  200. # Whether use cuBLAS
  201. set(USE_CUBLAS OFF)
  202.  
  203. # Whether use MIOpen
  204. set(USE_MIOPEN OFF)
  205.  
  206. # Whether use MPS
  207. set(USE_MPS OFF)
  208.  
  209. # Whether use rocBlas
  210. set(USE_ROCBLAS OFF)
  211.  
  212. # Whether use contrib sort
  213. set(USE_SORT ON)
  214.  
  215. # Whether use MKL-DNN (DNNL) codegen
  216. set(USE_DNNL_CODEGEN OFF)
  217.  
  218. # Whether to use Arm Compute Library (ACL) codegen
  219. # We provide 2 separate flags since we cannot build the ACL runtime on x86.
  220. # This is useful for cases where you want to cross-compile a relay graph
  221. # on x86 then run on AArch.
  222. #
  223. # An example of how to use this can be found here: docs/deploy/arm_compute_lib.rst.
  224. #
  225. # USE_ARM_COMPUTE_LIB - Support for compiling a relay graph offloading supported
  226. #                       operators to Arm Compute Library. OFF/ON
  227. # USE_ARM_COMPUTE_LIB_GRAPH_EXECUTOR - Run Arm Compute Library annotated functions via the ACL
  228. #                                     runtime. OFF/ON/"path/to/ACL"
  229. set(USE_ARM_COMPUTE_LIB OFF)
  230. set(USE_ARM_COMPUTE_LIB_GRAPH_EXECUTOR OFF)
  231.  
  232. # Whether to build with Arm Ethos-N support
  233. # Possible values:
  234. # - OFF: disable Arm Ethos-N support
  235. # - path/to/arm-ethos-N-stack: use a specific version of the
  236. #   Ethos-N driver stack
  237. set(USE_ETHOSN OFF)
  238. # If USE_ETHOSN is enabled, use ETHOSN_HW (ON) if Ethos-N hardware is available on this machine
  239. # otherwise use ETHOSN_HW (OFF) to use the software test infrastructure
  240. set(USE_ETHOSN_HW OFF)
  241.  
  242. # Whether to build with Arm(R) Ethos(TM)-U NPU codegen support
  243. set(USE_ETHOSU OFF)
  244.  
  245. # Whether to build with CMSIS-NN external library support.
  246. # See https://github.com/ARM-software/CMSIS_5
  247. set(USE_CMSISNN ON)
  248.  
  249. # Whether to build with TensorRT codegen or runtime
  250. # Examples are available here: docs/deploy/tensorrt.rst.
  251. #
  252. # USE_TENSORRT_CODEGEN - Support for compiling a relay graph where supported operators are
  253. #                        offloaded to TensorRT. OFF/ON
  254. # USE_TENSORRT_RUNTIME - Support for running TensorRT compiled modules, requires presense of
  255. #                        TensorRT library. OFF/ON/"path/to/TensorRT"
  256. set(USE_TENSORRT_CODEGEN OFF)
  257. set(USE_TENSORRT_RUNTIME OFF)
  258.  
  259. # Whether use VITIS-AI codegen
  260. set(USE_VITIS_AI OFF)
  261.  
  262. # Build Verilator codegen and runtime
  263. set(USE_VERILATOR OFF)
  264.  
  265. # Build ANTLR parser for Relay text format
  266. # Possible values:
  267. # - ON: enable ANTLR by searching default locations (cmake find_program for antlr4 and /usr/local for jar)
  268. # - OFF: disable ANTLR
  269. # - /path/to/antlr-*-complete.jar: path to specific ANTLR jar file
  270. set(USE_ANTLR OFF)
  271.  
  272. # Whether use Relay debug mode
  273. set(USE_RELAY_DEBUG OFF)
  274.  
  275. # Whether to build fast VTA simulator driver
  276. set(USE_VTA_FSIM OFF)
  277.  
  278. # Whether to build cycle-accurate VTA simulator driver
  279. set(USE_VTA_TSIM OFF)
  280.  
  281. # Whether to build VTA FPGA driver (device side only)
  282. set(USE_VTA_FPGA OFF)
  283.  
  284. # Whether use Thrust
  285. set(USE_THRUST OFF)
  286.  
  287. # Whether to build the TensorFlow TVMDSOOp module
  288. set(USE_TF_TVMDSOOP OFF)
  289.  
  290. # Whether to build the PyTorch custom class module
  291. set(USE_PT_TVMDSOOP OFF)
  292.  
  293. # Whether to use STL's std::unordered_map or TVM's POD compatible Map
  294. set(USE_FALLBACK_STL_MAP OFF)
  295.  
  296. # Whether to enable Hexagon support
  297. set(USE_HEXAGON OFF)
  298. set(USE_HEXAGON_SDK /path/to/sdk)
  299.  
  300. # Whether to build the minimal support android rpc server for Hexagon
  301. set(USE_HEXAGON_RPC OFF)
  302.  
  303. # Hexagon architecture to target when compiling TVM itself (not the target for
  304. # compiling _by_ TVM). This applies to components like the TVM runtime, but is
  305. # also used to select correct include/library paths from the Hexagon SDK when
  306. # building runtime for Android.
  307. # Valid values are v65, v66, v68, v69.
  308. set(USE_HEXAGON_ARCH "v66")
  309.  
  310. # Whether to use ONNX codegen
  311. set(USE_TARGET_ONNX OFF)
  312.  
  313. # Whether enable BNNS runtime
  314. set(USE_BNNS OFF)
  315.  
  316. # Whether to use libbacktrace
  317. # Libbacktrace provides line and column information on stack traces from errors.
  318. # It is only supported on linux and macOS.
  319. # Possible values:
  320. # - AUTO: auto set according to system information and feasibility
  321. # - ON: enable libbacktrace
  322. # - OFF: disable libbacktrace
  323. set(USE_LIBBACKTRACE AUTO)
  324.  
  325. # Whether to build static libtvm_runtime.a, the default is to build the dynamic
  326. # version: libtvm_runtime.so.
  327. #
  328. # The static runtime library needs to be linked into executables with the linker
  329. # option --whole-archive (or its equivalent). The reason is that the TVM registry
  330. # mechanism relies on global constructors being executed at program startup.
  331. # Global constructors alone are not sufficient for the linker to consider a
  332. # library member to be used, and some of such library members (object files) may
  333. # not be included in the final executable. This would make the corresponding
  334. # runtime functions to be unavailable to the program.
  335. set(BUILD_STATIC_RUNTIME OFF)
  336.  
  337. # Whether to enable PAPI support in profiling. PAPI provides access to hardware
  338. # counters while profiling.
  339. # Possible values:
  340. # - ON: enable PAPI support. Will search PKG_CONFIG_PATH for a papi.pc
  341. # - OFF: disable PAPI support.
  342. # - /path/to/folder/containing/: Path to folder containing papi.pc.
  343. set(USE_PAPI OFF)
  344.  
  345. # Whether to use GoogleTest for C++ unit tests. When enabled, the generated
  346. # build file (e.g. Makefile) will have a target "cpptest".
  347. # Possible values:
  348. # - ON: enable GoogleTest. The package `GTest` will be required for cmake
  349. #   to succeed.
  350. # - OFF: disable GoogleTest.
  351. # - AUTO: cmake will attempt to find the GTest package, if found GTest will
  352. #   be enabled, otherwise it will be disabled.
  353. # Note that cmake will use `find_package` to find GTest. Please use cmake's
  354. # predefined variables to specify the path to the GTest package if needed.
  355. set(USE_GTEST AUTO)
  356.  
  357. # Enable using CUTLASS as a BYOC backend
  358. # Need to have USE_CUDA=ON
  359. set(USE_CUTLASS OFF)
  360.  
  361. # Enable to show a summary of TVM options
  362. set(SUMMARIZE OFF)
  363.  
  364. # Whether to use LibTorch as backend
  365. # To enable pass the path to the root libtorch (or PyTorch) directory
  366. # OFF or /path/to/torch/
  367. set(USE_LIBTORCH OFF)
Advertisement
Add Comment
Please, Sign In to add comment