Skip to content

Commit

Permalink
move streaming testing to test_sampling.py
Browse files Browse the repository at this point in the history
  • Loading branch information
pavel-esir committed Jan 16, 2025
1 parent 5832846 commit b73da04
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 29 deletions.
23 changes: 1 addition & 22 deletions tests/python_tests/test_llm_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from pathlib import Path
import torch

from common import run_llm_pipeline_with_ref, convert_to_hf, StreamerWithResults
from common import run_llm_pipeline_with_ref, convert_to_hf
from ov_genai_test_utils import (
get_models_list,
read_model,
Expand Down Expand Up @@ -194,27 +194,6 @@ def test_callback_kwargs_one_string(callback):
pipe.generate('table is made of', max_new_tokens=10, streamer=callback)


@pytest.mark.parametrize("streamer", [StreamerWithResults()])
@pytest.mark.parametrize("prompt", [
'table is made of',
'The Sun is yellow because',
'你好! 你好嗎?'
'I have an interview about product speccing with the company Weekend Health. Give me an example of a question they might ask with regards about a new feature'
])
@pytest.mark.parametrize("generation_config", [dict(max_new_tokens=10), dict(max_new_tokens=300)])
@pytest.mark.parametrize("model_descr", get_models_list())
@pytest.mark.parametrize("use_cb", [True, False])
@pytest.mark.precommit
@pytest.mark.nightly
def test_streamer_compare_texts(model_descr, generation_config, prompt, streamer, use_cb):
run_llm_pipeline_with_ref(model_id=model_descr[0],
prompts=[prompt],
generation_config=generation_config,
tmp_path=model_descr[1],
use_cb=use_cb,
streamer=streamer)


@pytest.mark.parametrize("callback", [print, user_defined_callback, lambda subword: print(subword)])
@pytest.mark.precommit
@pytest.mark.nightly
Expand Down
27 changes: 20 additions & 7 deletions tests/python_tests/test_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from openvino_genai import GenerationConfig, StopCriteria
from typing import List, TypedDict

from common import get_hugging_face_models, convert_models, run_llm_pipeline_with_ref, run_llm_pipeline
from common import get_hugging_face_models, convert_models, run_llm_pipeline_with_ref, run_llm_pipeline, StreamerWithResults


@pytest.mark.precommit
Expand Down Expand Up @@ -58,13 +58,26 @@ def test_stop_strings(tmp_path, generation_config):
@pytest.mark.precommit
@pytest.mark.parametrize("generation_config",
[dict(max_new_tokens=30),
dict(max_new_tokens=30, repetition_penalty=2.0),],
ids=["basic",
"repetition_penalty",])
def test_greedy(tmp_path, generation_config):
prompts = [ "What is OpenVINO?" ]
dict(max_new_tokens=30, repetition_penalty=2.0),
dict(max_new_tokens=300)],
ids=["basic", "repetition_penalty", "long_max_new_tokens"])
@pytest.mark.parametrize("streamer", [StreamerWithResults()])
@pytest.mark.parametrize("prompt", [
'What is OpenVINO?',
'table is made of',
'The Sun is yellow because',
'你好! 你好嗎?'
'I have an interview about product speccing with the company Weekend Health. Give me an example of a question they might ask with regards about a new feature'
])
@pytest.mark.parametrize("use_cb", [True, False])
def test_greedy(tmp_path, generation_config, prompt, streamer, use_cb):
model_id : str = "katuni4ka/tiny-random-phi3"
run_llm_pipeline_with_ref(model_id, prompts, generation_config, tmp_path)
run_llm_pipeline_with_ref(model_id=model_id,
prompts=[prompt],
generation_config=generation_config,
tmp_path=tmp_path,
use_cb=use_cb,
streamer=streamer)


@pytest.mark.precommit
Expand Down

0 comments on commit b73da04

Please sign in to comment.