Skip to content

Commit

Permalink
Fixed misprint (#965)
Browse files Browse the repository at this point in the history
  • Loading branch information
jane-intel authored Oct 14, 2024
1 parent 510fcd3 commit d041d88
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 5 deletions.
4 changes: 2 additions & 2 deletions llm_bench/python/llm_bench_utils/metrics_print.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,8 @@ def output_avg_statis_tokens(prompt_dict, prompt_idx_list, iter_data_list, batch
avg_2nd_tokens_latency = 'NA' if avg_2nd_tokens_latency < 0 else f'{avg_2nd_tokens_latency:.2f} ms/{latency_unit}'
avg_2nd_token_tput = 'NA' if avg_2nd_tokens_latency == 'NA' else f'{avg_2nd_token_tput:.2f} {latency_unit}s/s'
if is_text_gen is True:
prompt_dict[p_idx] = '\n[ INFO ] [Average] Prompt[{}] Input token size: {}, 1st token lantency: {}, ' \
'2nd token lantency: {}, 2nd tokens throughput: {}' \
prompt_dict[p_idx] = '\n[ INFO ] [Average] Prompt[{}] Input token size: {}, 1st token latency: {}, ' \
'2nd token latency: {}, 2nd tokens throughput: {}' \
.format(p_idx, avg_input_size, avg_1st_token_latency, avg_2nd_tokens_latency, avg_2nd_token_tput)
else:
prompt_dict[p_idx] = '\n[ INFO ] [Average] Prompt[{}] 1st step of unet latency: {}, ' \
Expand Down
6 changes: 3 additions & 3 deletions llm_bench/python/llm_bench_utils/output_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ def output_comments(result, use_case, writer):
comment_list.append('prompt_idx: Index of prompts')
elif use_case == 'image_gen':
comment_list.append("infer_count: Tex2Image models' Inference(or Sampling) step size")
comment_list.append('1st_latency: First step lantency of unet')
comment_list.append('1st_latency: First step latency of unet')
comment_list.append('2nd_avg_latency: Other steps latency of unet(exclude first step)')
comment_list.append('1st_infer_latency: Same as 1st_latency')
comment_list.append('2nd_infer_avg_latency: Same as 2nd_avg_latency')
comment_list.append('prompt_idx: Index of prompts')
elif use_case == 'ldm_super_resolution':
comment_list.append("infer_count: Tex2Image models' Inference(or Sampling) step size")
comment_list.append('1st_latency: First step lantency of unet')
comment_list.append('2nd_avg_latency: Other steps lantency of unet(exclude first step)')
comment_list.append('1st_latency: First step latency of unet')
comment_list.append('2nd_avg_latency: Other steps latency of unet(exclude first step)')
comment_list.append('1st_infer_latency: Same as 1st_latency')
comment_list.append('2nd_infer_avg_latency: Same as 2nd_avg_latency')
comment_list.append('prompt_idx: Image Index')
Expand Down

0 comments on commit d041d88

Please sign in to comment.