Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- diff --git a/modules/text_generation.py b/modules/text_generation.py
- index 00b7cc7..3637dc9 100644
- --- a/modules/text_generation.py
- +++ b/modules/text_generation.py
- @@ -237,6 +237,7 @@ def generate_reply_HF(question, original_question, seed, state, eos_token=None,
- # Update generate_params with the eos token and the stopping strings
- generate_params['eos_token_id'] = eos_token_ids
- generate_params['stopping_criteria'] = stopping_criteria_list
- + generate_params['decoder_input_ids'] = input_ids.clone()
- t0 = time.time()
- try:
- @@ -244,6 +245,7 @@ def generate_reply_HF(question, original_question, seed, state, eos_token=None,
- yield ''
- # Generate the entire reply at once.
- + print(f">> GEN: {generate_params}")
- if not state['stream']:
- with torch.no_grad():
- output = shared.model.generate(**generate_params)[0]
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement