Skip to content

Commit

Permalink
Added ability to use chat template for GenAI
Browse files Browse the repository at this point in the history
  • Loading branch information
AlexKoff88 committed Jan 2, 2025
1 parent 1d1fd96 commit daf213d
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 2 deletions.
1 change: 0 additions & 1 deletion tools/who_what_benchmark/whowhatbench/text_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,6 @@ def default_gen_answer(model, tokenizer, prompt, max_new_tokens, crop_question,
if crop_question:
tokens = tokens[:, inputs.shape[-1]:]
res = self.tokenizer.decode(tokens[0], skip_special_tokens=True)
print(res)
return res
else:
inputs = self.tokenizer(prompt, return_tensors="pt")
Expand Down
8 changes: 7 additions & 1 deletion tools/who_what_benchmark/whowhatbench/wwb.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,13 @@ def diff_strings(a: str, b: str, *, use_loguru_colors: bool = False) -> str:


def genai_gen_text(model, tokenizer, question, max_new_tokens, skip_question, use_chat_template=False):
return model.generate(question, do_sample=False, max_new_tokens=max_new_tokens)
if use_chat_template:
model.start_chat()
result = model.generate(question, do_sample=False, max_new_tokens=max_new_tokens)
model.finish_chat()
return result
else:
return model.generate(question, do_sample=False, max_new_tokens=max_new_tokens)


def llamacpp_gen_text(model, tokenizer, question, max_new_tokens, skip_question, use_chat_template=False):
Expand Down

0 comments on commit daf213d

Please sign in to comment.