Guest User

Untitled

a guest
Apr 9th, 2024
74
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 7.56 KB | None | 0 0
  1. llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
  2. llama_model_loader: - kv 0: general.architecture str = command-r
  3. llama_model_loader: - kv 1: general.name str = c4ai-command-r-v01
  4. llama_model_loader: - kv 2: command-r.block_count u32 = 40
  5. llama_model_loader: - kv 3: command-r.context_length u32 = 131072
  6. llama_model_loader: - kv 4: command-r.embedding_length u32 = 8192
  7. llama_model_loader: - kv 5: command-r.feed_forward_length u32 = 22528
  8. llama_model_loader: - kv 6: command-r.attention.head_count u32 = 64
  9. llama_model_loader: - kv 7: command-r.attention.head_count_kv u32 = 64
  10. llama_model_loader: - kv 8: command-r.rope.freq_base f32 = 8000000.000000
  11. llama_model_loader: - kv 9: command-r.attention.layer_norm_epsilon f32 = 0.000010
  12. llama_model_loader: - kv 10: general.file_type u32 = 11
  13. llama_model_loader: - kv 11: command-r.logit_scale f32 = 0.062500
  14. llama_model_loader: - kv 12: command-r.rope.scaling.type str = none
  15. llama_model_loader: - kv 13: tokenizer.ggml.model str = gpt2
  16. llama_model_loader: - kv 14: tokenizer.ggml.tokens arr[str,256000] = ["<PAD>", "<UNK>", "<CLS>", "<SEP>", ...
  17. llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, ...
  18. llama_model_loader: - kv 16: tokenizer.ggml.merges arr[str,253333] = ["Ġ Ġ", "Ġ t", "e r", "i n", "Ġ a...
  19. llama_model_loader: - kv 17: tokenizer.ggml.bos_token_id u32 = 5
  20. llama_model_loader: - kv 18: tokenizer.ggml.eos_token_id u32 = 255001
  21. llama_model_loader: - kv 19: tokenizer.ggml.padding_token_id u32 = 0
  22. llama_model_loader: - kv 20: tokenizer.ggml.add_bos_token bool = true
  23. llama_model_loader: - kv 21: tokenizer.ggml.add_eos_token bool = false
  24. llama_model_loader: - kv 22: general.quantization_version u32 = 2
  25. llama_model_loader: - type f32: 41 tensors
  26. llama_model_loader: - type q3_K: 280 tensors
  27. llama_model_loader: - type q6_K: 1 tensors
  28. llm_load_vocab: special tokens definition check successful ( 1008/256000 ).
  29. llm_load_print_meta: format = GGUF V3 (latest)
  30. llm_load_print_meta: arch = command-r
  31. llm_load_print_meta: vocab type = BPE
  32. llm_load_print_meta: n_vocab = 256000
  33. llm_load_print_meta: n_merges = 253333
  34. llm_load_print_meta: n_ctx_train = 131072
  35. llm_load_print_meta: n_embd = 8192
  36. llm_load_print_meta: n_head = 64
  37. llm_load_print_meta: n_head_kv = 64
  38. llm_load_print_meta: n_layer = 40
  39. llm_load_print_meta: n_rot = 128
  40. llm_load_print_meta: n_embd_head_k = 128
  41. llm_load_print_meta: n_embd_head_v = 128
  42. llm_load_print_meta: n_gqa = 1
  43. llm_load_print_meta: n_embd_k_gqa = 8192
  44. llm_load_print_meta: n_embd_v_gqa = 8192
  45. llm_load_print_meta: f_norm_eps = 1.0e-05
  46. llm_load_print_meta: f_norm_rms_eps = 0.0e+00
  47. llm_load_print_meta: f_clamp_kqv = 0.0e+00
  48. llm_load_print_meta: f_max_alibi_bias = 0.0e+00
  49. llm_load_print_meta: f_logit_scale = 6.2e-02
  50. llm_load_print_meta: n_ff = 22528
  51. llm_load_print_meta: n_expert = 0
  52. llm_load_print_meta: n_expert_used = 0
  53. llm_load_print_meta: causal attn = 1
  54. llm_load_print_meta: pooling type = 0
  55. llm_load_print_meta: rope type = 0
  56. llm_load_print_meta: rope scaling = none
  57. llm_load_print_meta: freq_base_train = 8000000.0
  58. llm_load_print_meta: freq_scale_train = 1
  59. llm_load_print_meta: n_yarn_orig_ctx = 131072
  60. llm_load_print_meta: rope_finetuned = unknown
  61. llm_load_print_meta: ssm_d_conv = 0
  62. llm_load_print_meta: ssm_d_inner = 0
  63. llm_load_print_meta: ssm_d_state = 0
  64. llm_load_print_meta: ssm_dt_rank = 0
  65. llm_load_print_meta: model type = 35B
  66. llm_load_print_meta: model ftype = Q3_K - Small
  67. llm_load_print_meta: model params = 34.98 B
  68. llm_load_print_meta: model size = 14.76 GiB (3.63 BPW)
  69. llm_load_print_meta: general.name = c4ai-command-r-v01
  70. llm_load_print_meta: BOS token = 5 '<BOS_TOKEN>'
  71. llm_load_print_meta: EOS token = 255001 '<|END_OF_TURN_TOKEN|>'
  72. llm_load_print_meta: PAD token = 0 '<PAD>'
  73. llm_load_print_meta: LF token = 136 'Ä'
  74. llm_load_tensors: ggml ctx size = 0.49 MiB
  75. llm_load_tensors: offloading 36 repeating layers to GPU
  76. llm_load_tensors: offloaded 36/41 layers to GPU
  77. llm_load_tensors: CPU buffer size = 15116.91 MiB
  78. llm_load_tensors: CUDA0 buffer size = 3705.97 MiB
  79. llm_load_tensors: CUDA1 buffer size = 3369.06 MiB
  80. llm_load_tensors: CUDA2 buffer size = 5053.59 MiB
  81. ...................................................................................
  82. llama_new_context_with_model: n_ctx = 2048
  83. llama_new_context_with_model: n_batch = 512
  84. llama_new_context_with_model: n_ubatch = 512
  85. llama_new_context_with_model: freq_base = 8000000.0
  86. llama_new_context_with_model: freq_scale = 1
  87. llama_kv_cache_init: CUDA_Host KV buffer size = 256.00 MiB
  88. llama_kv_cache_init: CUDA0 KV buffer size = 704.00 MiB
  89. llama_kv_cache_init: CUDA1 KV buffer size = 640.00 MiB
  90. llama_kv_cache_init: CUDA2 KV buffer size = 960.00 MiB
  91. llama_new_context_with_model: KV self size = 2560.00 MiB, K (f16): 1280.00 MiB, V (f16): 1280.00 MiB
  92. llama_new_context_with_model: CUDA_Host output buffer size = 516.00 MiB
  93. llama_new_context_with_model: CUDA0 compute buffer size = 2156.62 MiB
  94. llama_new_context_with_model: CUDA1 compute buffer size = 324.00 MiB
  95. llama_new_context_with_model: CUDA2 compute buffer size = 324.00 MiB
  96. llama_new_context_with_model: CUDA_Host compute buffer size = 36.00 MiB
  97. llama_new_context_with_model: graph nodes = 1245
  98. llama_new_context_with_model: graph splits = 46
  99. {"function":"initialize","level":"INFO","line":444,"msg":"initializing slots","n_slots":1,"tid":"21568","timestamp":1712661638}
  100. {"function":"initialize","level":"INFO","line":456,"msg":"new slot","n_ctx_slot":2048,"slot_id":0,"tid":"21568","timestamp":1712661638}
  101. time=2024-04-09T16:50:38.720+05:30 level=INFO source=dyn_ext_server.go:159 msg="Starting llama main loop"
  102. {"function":"update_slots","level":"INFO","line":1574,"msg":"all slots are idle and system prompt is empty, clear the KV cache","tid":"27556","timestamp":1712661638}
  103. {"function":"launch_slot_with_data","level":"INFO","line":829,"msg":"slot is processing task","slot_id":0,"task_id":0,"tid":"27556","timestamp":1712661638}
  104. {"function":"update_slots","ga_i":0,"level":"INFO","line":1812,"msg":"slot progression","n_past":0,"n_past_se":0,"n_prompt_tokens_processed":77,"slot_id":0,"task_id":0,"tid":"27556","timestamp":1712661638}
  105. {"function":"update_slots","level":"INFO","line":1836,"msg":"kv cache rm [p0, end)","p0":0,"slot_id":0,"task_id":0,"tid":"27556","timestamp":1712661638}
  106. CUDA error: out of memory
  107. current device: 0, in function alloc at C:\a\ollama\ollama\llm\llama.cpp\ggml-cuda.cu:532
  108. cuMemSetAccess(pool_addr + pool_size, reserve_size, &access, 1)
  109. GGML_ASSERT: C:\a\ollama\ollama\llm\llama.cpp\ggml-cuda.cu:193: !"CUDA error"
Add Comment
Please, Sign In to add comment