Lissanro

Nemotron-Ultra-253B in ik_llama.cpp triggers "CUDA error: an illegal memory access was encountered"

May 5th, 2025
16
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 16.93 KB | None | 0 0
  1. ~/pkgs/ik_llama.cpp/build/bin/llama-server \
  2. --model /mnt/secondary/neuro/Llama-3_1-Nemotron-Ultra-253B-v1-GGUF-UD-Q4_K_XL-131072seq/Llama-3_1-Nemotron-Ultra-253B-v1-UD-Q4_K_XL-00001-of-00004.gguf \
  3. --ctx-size 81920 --n-gpu-layers 36 --tensor-split 25,25,25,25 \
  4. -fa -ctk q8_0 -ctv q8_0 --threads 64 --host 0.0.0.0 --port 5000 -fmoe
  5. INFO [ main] build info | tid="132669687377920" timestamp=1746483438 build=3667 commit="e3fec173"
  6. INFO [ main] system info | tid="132669687377920" timestamp=1746483438 n_threads=64 n_threads_batch=-1 total_threads=128 system_info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | "
  7. llama_model_loader: additional 3 GGUFs metadata loaded.
  8. llama_model_loader: loaded meta data with 43 key-value pairs and 648 tensors from /mnt/secondary/neuro/Llama-3_1-Nemotron-Ultra-253B-v1-GGUF-UD-Q4_K_XL-131072seq/Llama-3_1-Nemotron-Ultra-253B-v1-UD-Q4_K_XL-00001-of-00004.gguf (version GGUF V3 (latest))
  9. llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
  10. llama_model_loader: - kv 0: general.architecture str = deci
  11. llama_model_loader: - kv 1: general.type str = model
  12. llama_model_loader: - kv 2: general.name str = Llama_Nemotron_Ultra
  13. llama_model_loader: - kv 3: general.version str = v1
  14. llama_model_loader: - kv 4: general.finetune str = 3_1-Nemotron-Ultra
  15. llama_model_loader: - kv 5: general.basename str = Llama-3_1-Nemotron-Ultra-253B-V1
  16. llama_model_loader: - kv 6: general.quantized_by str = Unsloth
  17. llama_model_loader: - kv 7: general.size_label str = 253B
  18. llama_model_loader: - kv 8: general.license str = other
  19. llama_model_loader: - kv 9: general.license.name str = nvidia-open-model-license
  20. llama_model_loader: - kv 10: general.license.link str = https://www.nvidia.com/en-us/agreemen...
  21. llama_model_loader: - kv 11: general.repo_url str = https://huggingface.co/unsloth
  22. llama_model_loader: - kv 12: general.tags arr[str,4] = ["nvidia", "llama-3", "pytorch", "tex...
  23. llama_model_loader: - kv 13: general.languages arr[str,1] = ["en"]
  24. llama_model_loader: - kv 14: deci.rope.freq_base f32 = 500000.000000
  25. llama_model_loader: - kv 15: deci.attention.head_count_kv arr[i32,162] = [8, 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, ...
  26. llama_model_loader: - kv 16: deci.attention.head_count arr[i32,162] = [128, 128, 128, 128, 128, 128, 128, 1...
  27. llama_model_loader: - kv 17: deci.feed_forward_length arr[i32,162] = [5376, 10752, 16128, 16128, 16128, 16...
  28. llama_model_loader: - kv 18: deci.block_count u32 = 162
  29. llama_model_loader: - kv 19: deci.context_length u32 = 131072
  30. llama_model_loader: - kv 20: deci.embedding_length u32 = 16384
  31. llama_model_loader: - kv 21: deci.attention.layer_norm_rms_epsilon f32 = 0.000010
  32. llama_model_loader: - kv 22: deci.attention.key_length u32 = 128
  33. llama_model_loader: - kv 23: deci.attention.value_length u32 = 128
  34. llama_model_loader: - kv 24: deci.vocab_size u32 = 128256
  35. llama_model_loader: - kv 25: deci.rope.dimension_count u32 = 128
  36. llama_model_loader: - kv 26: tokenizer.ggml.model str = gpt2
  37. llama_model_loader: - kv 27: tokenizer.ggml.pre str = llama-bpe
  38. llama_model_loader: - kv 28: tokenizer.ggml.tokens arr[str,128256] = ["!", "\"", "#", "$", "%", "&", "'", ...
  39. llama_model_loader: - kv 29: tokenizer.ggml.token_type arr[i32,128256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
  40. llama_model_loader: - kv 30: tokenizer.ggml.merges arr[str,280147] = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "...
  41. llama_model_loader: - kv 31: tokenizer.ggml.bos_token_id u32 = 128000
  42. llama_model_loader: - kv 32: tokenizer.ggml.eos_token_id u32 = 128009
  43. llama_model_loader: - kv 33: tokenizer.chat_template str = {{- bos_token }}{%- if messages[0]['r...
  44. llama_model_loader: - kv 34: general.quantization_version u32 = 2
  45. llama_model_loader: - kv 35: general.file_type u32 = 15
  46. llama_model_loader: - kv 36: quantize.imatrix.file str = Llama-3_1-Nemotron-Ultra-253B-v1-GGUF...
  47. llama_model_loader: - kv 37: quantize.imatrix.dataset str = unsloth_calibration_Llama-3_1-Nemotro...
  48. llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 499
  49. llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 544
  50. llama_model_loader: - kv 40: split.no u16 = 0
  51. llama_model_loader: - kv 41: split.tensors.count i32 = 648
  52. llama_model_loader: - kv 42: split.count u16 = 4
  53. llama_model_loader: - type f32: 147 tensors
  54. llama_model_loader: - type q4_K: 428 tensors
  55. llama_model_loader: - type q6_K: 73 tensors
  56. llm_load_vocab: special tokens cache size = 256
  57. llm_load_vocab: token to piece cache size = 0.7999 MB
  58. llm_load_print_meta: format = GGUF V3 (latest)
  59. llm_load_print_meta: arch = deci
  60. llm_load_print_meta: vocab type = BPE
  61. llm_load_print_meta: n_vocab = 128256
  62. llm_load_print_meta: n_merges = 280147
  63. llm_load_print_meta: vocab_only = 0
  64. llm_load_print_meta: n_ctx_train = 131072
  65. llm_load_print_meta: n_embd = 16384
  66. llm_load_print_meta: n_layer = 162
  67. llm_load_print_meta: n_head = [128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 128, 128, 128, 0, 0, 0, 0, 0, 128, 128, 128, 128, 0, 0, 0, 128, 128, 128, 0, 128, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 0, 0, 0, 0, 0, 128, 128, 128, 128, 0, 0, 0, 0, 0, 128, 128, 128, 128, 0, 0, 0, 0, 0, 128, 128, 128, 128, 0, 0, 0, 0, 0, 128, 128, 128, 128, 0, 0, 128, 128, 128, 128, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 128, 128, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128]
  68. llm_load_print_meta: n_head_kv = [8, 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 8, 8, 8, 0, 0, 0, 0, 0, 8, 8, 8, 8, 0, 0, 0, 8, 8, 8, 0, 8, 0, 0, 0, 0, 0, 0, 8, 8, 8, 8, 0, 0, 0, 0, 0, 8, 8, 8, 8, 0, 0, 0, 0, 0, 8, 8, 8, 8, 0, 0, 0, 0, 0, 8, 8, 8, 8, 0, 0, 0, 0, 0, 8, 8, 8, 8, 0, 0, 8, 8, 8, 8, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 8, 8, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 0, 8, 8, 8, 8, 8, 8, 8, 8]
  69. llm_load_print_meta: n_rot = 128
  70. llm_load_print_meta: n_swa = 0
  71. llm_load_print_meta: n_swa_pattern = 1
  72. llm_load_print_meta: n_embd_head_k = 128
  73. llm_load_print_meta: n_embd_head_v = 128
  74. llm_load_print_meta: n_gqa = [16, 16, 16, 16, 16, 16, 16, 16, 16, 0, 0, 0, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 16, 16, 16, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0, 0, 0, 16, 16, 16, 0, 16, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0, 0, 16, 16, 16, 16, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 16, 16, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 0, 16, 16, 16, 16, 16, 16, 16, 16]
  75. llm_load_print_meta: n_embd_k_gqa = [1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 0, 0, 1024, 1024, 1024, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 0, 1024, 1024, 1024, 0, 1024, 0, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 1024, 0, 0, 0, 0, 0, 0, 0, 0, 1024, 0, 0, 0, 0, 0, 1024, 1024, 0, 1024, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1024, 1024, 0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024]
  76. llm_load_print_meta: n_embd_v_gqa = [1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 0, 0, 1024, 1024, 1024, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 0, 1024, 1024, 1024, 0, 1024, 0, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 0, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 1024, 1024, 1024, 1024, 0, 0, 1024, 0, 0, 0, 0, 0, 0, 0, 0, 1024, 0, 0, 0, 0, 0, 1024, 1024, 0, 1024, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1024, 1024, 0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024]
  77. llm_load_print_meta: f_norm_eps = 0.0e+00
  78. llm_load_print_meta: f_norm_rms_eps = 1.0e-05
  79. llm_load_print_meta: f_clamp_kqv = 0.0e+00
  80. llm_load_print_meta: f_max_alibi_bias = 0.0e+00
  81. llm_load_print_meta: f_logit_scale = 0.0e+00
  82. llm_load_print_meta: n_ff = [5376, 10752, 16128, 16128, 16128, 16128, 16128, 16128, 21504, 0, 0, 0, 0, 21504, 21504, 21504, 53248, 53248, 0, 0, 0, 0, 0, 0, 53248, 53248, 53248, 0, 0, 0, 0, 0, 53248, 53248, 53248, 26624, 0, 0, 0, 21504, 21504, 21504, 21504, 53248, 53248, 0, 0, 0, 0, 0, 53248, 53248, 53248, 53248, 0, 0, 0, 0, 0, 53248, 53248, 53248, 53248, 0, 0, 0, 0, 0, 53248, 53248, 53248, 53248, 0, 0, 0, 0, 0, 53248, 53248, 53248, 53248, 0, 0, 0, 0, 0, 53248, 37376, 37376, 37376, 0, 0, 32000, 26624, 26624, 26624, 26624, 26624, 26624, 0, 26624, 26624, 26624, 26624, 26624, 26624, 26624, 26624, 0, 0, 0, 0, 0, 32000, 53248, 53248, 53248, 0, 0, 0, 0, 0, 0, 0, 0, 399360, 0, 0, 0, 0, 0, 0, 0, 0, 425984, 0, 0, 0, 0, 0, 0, 0, 0, 343040, 0, 0, 0, 0, 0, 301056, 21504, 21504, 26624, 0, 26624, 26624, 37376, 53248, 53248, 53248, 53248, 26624]
  83. llm_load_print_meta: n_expert = 0
  84. llm_load_print_meta: n_expert_used = 0
  85. llm_load_print_meta: causal attn = 1
  86. llm_load_print_meta: pooling type = 0
  87. llm_load_print_meta: rope type = 0
  88. llm_load_print_meta: rope scaling = linear
  89. llm_load_print_meta: freq_base_train = 500000.0
  90. llm_load_print_meta: freq_scale_train = 1
  91. llm_load_print_meta: n_ctx_orig_yarn = 131072
  92. llm_load_print_meta: rope_finetuned = unknown
  93. llm_load_print_meta: ssm_d_conv = 0
  94. llm_load_print_meta: ssm_d_inner = 0
  95. llm_load_print_meta: ssm_d_state = 0
  96. llm_load_print_meta: ssm_dt_rank = 0
  97. llm_load_print_meta: model type = 405B
  98. llm_load_print_meta: model ftype = Q4_K - Medium
  99. llm_load_print_meta: model params = 253.401 B
  100. llm_load_print_meta: model size = 140.564 GiB (4.765 BPW)
  101. llm_load_print_meta: repeating layers = 137.857 GiB (4.752 BPW, 249.199 B parameters)
  102. llm_load_print_meta: general.name = Llama_Nemotron_Ultra
  103. llm_load_print_meta: BOS token = 128000 '<|begin_of_text|>'
  104. llm_load_print_meta: EOS token = 128009 '<|eot_id|>'
  105. llm_load_print_meta: LF token = 128 'Ä'
  106. llm_load_print_meta: EOT token = 128009 '<|eot_id|>'
  107. llm_load_print_meta: max token length = 256
  108. ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
  109. ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
  110. ggml_cuda_init: found 4 CUDA devices:
  111. Device 0: NVIDIA GeForce RTX 3090, compute capability 8.6, VMM: yes
  112. Device 1: NVIDIA GeForce RTX 3090, compute capability 8.6, VMM: yes
  113. Device 2: NVIDIA GeForce RTX 3090, compute capability 8.6, VMM: yes
  114. Device 3: NVIDIA GeForce RTX 3090, compute capability 8.6, VMM: yes
  115. llm_load_tensors: ggml ctx size = 1.99 MiB
  116. llm_load_tensors: offloading 36 repeating layers to GPU
  117. llm_load_tensors: offloaded 36/163 layers to GPU
  118. llm_load_tensors: CPU buffer size = 47495.50 MiB
  119. llm_load_tensors: CPU buffer size = 45225.56 MiB
  120. llm_load_tensors: CPU buffer size = 7020.06 MiB
  121. llm_load_tensors: CUDA0 buffer size = 12948.06 MiB
  122. llm_load_tensors: CUDA1 buffer size = 9045.06 MiB
  123. llm_load_tensors: CUDA2 buffer size = 10477.13 MiB
  124. llm_load_tensors: CUDA3 buffer size = 11725.75 MiB
  125. ...................................................................................
  126. llama_new_context_with_model: n_ctx = 81920
  127. llama_new_context_with_model: n_batch = 2048
  128. llama_new_context_with_model: n_ubatch = 512
  129. llama_new_context_with_model: flash_attn = 1
  130. llama_new_context_with_model: mla_attn = 0
  131. llama_new_context_with_model: attn_max_b = 0
  132. llama_new_context_with_model: fused_moe = 1
  133. llama_new_context_with_model: ser = -1, 0
  134. llama_new_context_with_model: freq_base = 500000.0
  135. llama_new_context_with_model: freq_scale = 1
  136. llama_kv_cache_init: CUDA_Host KV buffer size = 9180.00 MiB
  137. llama_kv_cache_init: CUDA0 KV buffer size = 0.00 MiB
  138. llama_kv_cache_init: CUDA1 KV buffer size = 0.00 MiB
  139. llama_kv_cache_init: CUDA2 KV buffer size = 340.00 MiB
  140. llama_kv_cache_init: CUDA3 KV buffer size = 1360.00 MiB
  141. llama_new_context_with_model: KV self size = 10880.00 MiB, K (q8_0): 5440.00 MiB, V (q8_0): 5440.00 MiB
  142. llama_new_context_with_model: CUDA_Host output buffer size = 0.98 MiB
  143. llama_new_context_with_model: CUDA0 compute buffer size = 5576.07 MiB
  144. llama_new_context_with_model: CUDA1 compute buffer size = 2074.00 MiB
  145. llama_new_context_with_model: CUDA2 compute buffer size = 1908.00 MiB
  146. llama_new_context_with_model: CUDA3 compute buffer size = 456.00 MiB
  147. llama_new_context_with_model: CUDA_Host compute buffer size = 192.01 MiB
  148. llama_new_context_with_model: graph nodes = 1708
  149. llama_new_context_with_model: graph splits = 760
  150. INFO [ init] initializing slots | tid="132669687377920" timestamp=1746484358 n_slots=1
  151. INFO [ init] new slot | tid="132669687377920" timestamp=1746484358 id_slot=0 n_ctx_slot=81920
  152. INFO [ main] model loaded | tid="132669687377920" timestamp=1746484358
  153. INFO [ main] chat template | tid="132669687377920" timestamp=1746484358 chat_example="<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHello<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nHi there<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHow are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" built_in=true
  154. INFO [ main] HTTP server listening | tid="132669687377920" timestamp=1746484358 n_threads_http="127" port="5000" hostname="0.0.0.0"
  155. INFO [ update_slots] all slots are idle | tid="132669687377920" timestamp=1746484358
  156. INFO [ log_server_request] request | tid="132548814180352" timestamp=1746485079 remote_addr="127.0.0.1" remote_port=44742 status=200 method="GET" path="/v1/models" params={}
  157. INFO [ log_server_request] request | tid="132548814180352" timestamp=1746485079 remote_addr="127.0.0.1" remote_port=44742 status=200 method="GET" path="/props" params={}
  158. INFO [ launch_slot_with_task] slot is processing task | tid="132669687377920" timestamp=1746485085 id_slot=0 id_task=0
  159. INFO [ update_slots] kv cache rm [p0, end) | tid="132669687377920" timestamp=1746485085 id_slot=0 id_task=0 p0=0
  160. CUDA error: an illegal memory access was encountered
  161. current device: 0, in function ggml_backend_cuda_synchronize at /home/lissanro/pkgs/ik_llama.cpp/ggml/src/ggml-cuda.cu:3054
  162. cudaStreamSynchronize(cuda_ctx->stream())
  163. /home/lissanro/pkgs/ik_llama.cpp/ggml/src/ggml-cuda.cu:110: CUDA error
  164. Could not attach to process. If your uid matches the uid of the target
  165. process, check the setting of /proc/sys/kernel/yama/ptrace_scope, or try
  166. again as the root user. For more details, see /etc/sysctl.d/10-ptrace.conf
  167. ptrace: Operation not permitted.
  168. No stack.
  169. The program is not being run.
  170. zsh: IOT instruction (core dumped) ~/pkgs/ik_llama.cpp/build/bin/llama-server --model --ctx-size 81920 36 -f
Add Comment
Please, Sign In to add comment