Advertisement
Guest User

config.toml

a guest
Sep 19th, 2024
1,572
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.36 KB | None | 0 0
  1. # Comment to be included in the beginning of the final report.
  2. comment = ""
  3.  
  4. [server]
  5. url = "http://localhost:11434/v1"
  6. api_key = "api key"
  7. model = "Qwen2.5-32B-Instruct-32k-Q3_K_S:latest"
  8. timeout = 600.0
  9.  
  10. [inference]
  11. # Ssettings below are from evaluate_from_local.py for VLLM on TIGER-AI-Lab/MMLU-Pro
  12. temperature = 0.0
  13. top_p = 1.0 # not specified but default for VLLM
  14. max_tokens = 2048
  15. # The variable {subject} will be replaced with appropriate value in runtime.
  16. system_prompt = "The following are multiple choice questions (with answers) about {subject}. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
  17. # "multi_chat" inserts COT examples into multi-turn messages. Use for instruct/chat models.
  18. # "no_chat" uses v1/completion api. Use for non-instruct/chat model.
  19. # "single_chat" (from the script for GPT-4O) inserts all the COT examples and question into a single message. Not recommended, use only for legacy compatibility.
  20. style = "multi_chat"
  21.  
  22. [test]
  23. categories = ['biology', 'business', 'chemistry', 'computer science', 'economics', 'engineering', 'health', 'history', 'law', 'math', 'philosophy', 'physics', 'psychology', 'other']
  24. parallel = 1
  25.  
  26. [log]
  27. # Verbosity between 0-2
  28. verbosity = 0
  29. # If true, logs exact prompt sent to the model in the test result files.
  30. log_prompt = true
  31.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement