From 34dc4692c5f8a59eab278483a66fe639a4b0ecbc Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 31 Dec 2024 15:53:30 +0400 Subject: [PATCH] Fixed typo (#1458) --- src/cpp/src/continuous_batching_impl.cpp | 2 +- src/cpp/src/lm_encoding.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cpp/src/continuous_batching_impl.cpp b/src/cpp/src/continuous_batching_impl.cpp index 15c0e69d58..7b076504d0 100644 --- a/src/cpp/src/continuous_batching_impl.cpp +++ b/src/cpp/src/continuous_batching_impl.cpp @@ -195,7 +195,7 @@ void ContinuousBatchingPipeline::ContinuousBatchingImpl::step() { step_count++; #endif - // process generation_config.echo parameetr + // process generation_config.echo parameter _fill_prompt_log_probs(m_requests, logits); SamplerOutput sampler_output; diff --git a/src/cpp/src/lm_encoding.cpp b/src/cpp/src/lm_encoding.cpp index 083c591927..9ef876d8aa 100644 --- a/src/cpp/src/lm_encoding.cpp +++ b/src/cpp/src/lm_encoding.cpp @@ -119,7 +119,7 @@ std::pair> get_lm_encoded_results( auto logits = m_llm.get_tensor("logits"); - // since we have applied `Slice` operationto last MatMul, model output sequence lenght is 1 + // since we have applied `Slice` operation to last MatMul, model output sequence lenght is 1 // so, we need to update sequence groups to think that they already have processed all prompt tokens except last ones // and schedule only `output_sequence_len` ones int64_t output_sequence_len = logits.get_shape().at(1);