diff --git a/nemoguardrails/actions/v2_x/generation.py b/nemoguardrails/actions/v2_x/generation.py index 9e0d74f95..c7bcba8f0 100644 --- a/nemoguardrails/actions/v2_x/generation.py +++ b/nemoguardrails/actions/v2_x/generation.py @@ -763,16 +763,21 @@ async def generate_value( if "GenerateValueAction" not in result.text: examples += f"{result.text}\n\n" - llm_call_info_var.set( - LLMCallInfo(task=Task.GENERATE_VALUE_FROM_INSTRUCTION.value) + out_variables: dict[str, Any] = {} + rendered_instructions = self.llm_task_manager._render_string( + instructions, + out_variables=out_variables, ) + task = out_variables.get("template", Task.GENERATE_VALUE_FROM_INSTRUCTION) + llm_call_info_var.set(LLMCallInfo(task=task)) + prompt = self.llm_task_manager.render_task_prompt( - task=Task.GENERATE_VALUE_FROM_INSTRUCTION, + task=task, events=events, context={ "examples": examples, - "instructions": instructions, + "instructions": rendered_instructions, "var_name": var_name if var_name else "result", "context": state.context, }, @@ -869,16 +874,20 @@ async def generate_flow( render_context["tool_names"] = ", ".join(tool_names) # TODO: add the context of the flow + out_variables = {} flow_nld = self.llm_task_manager._render_string( - textwrap.dedent(docstring), context=render_context, events=events + textwrap.dedent(docstring), + context=render_context, + events=events, + out_variables=out_variables, ) - llm_call_info_var.set( - LLMCallInfo(task=Task.GENERATE_FLOW_CONTINUATION_FROM_NLD.value) - ) + task = out_variables.get("template", Task.GENERATE_FLOW_CONTINUATION_FROM_NLD) + + llm_call_info_var.set(LLMCallInfo(task=task)) prompt = self.llm_task_manager.render_task_prompt( - task=Task.GENERATE_FLOW_CONTINUATION_FROM_NLD, + task=task, events=events, context={ "flow_nld": flow_nld, diff --git a/nemoguardrails/llm/taskmanager.py b/nemoguardrails/llm/taskmanager.py index b1304c258..dea4dc3d6 100644 --- a/nemoguardrails/llm/taskmanager.py +++ b/nemoguardrails/llm/taskmanager.py @@ -109,12 +109,14 @@ def _render_string( template_str: str, context: Optional[dict] = None, events: Optional[List[dict]] = None, + out_variables: Optional[dict] = None, ) -> str: """Render a template using the provided context information. :param template_str: The template to render. :param context: The context for rendering the prompt. :param events: The history of events so far. + :param out_variables: If not None the dict will be populated with variables set in the template :return: The rendered template. :rtype: str. """ @@ -152,7 +154,15 @@ def _render_string( render_context[variable] = value - return template.render(render_context) + rendered = template.render(render_context) + + if out_variables is not None: + mod = template.module + template_vars = { + n: getattr(mod, n) for n in dir(mod) if not n.startswith("_") + } + out_variables.update(template_vars) + return rendered def _render_messages( self, diff --git a/tests/utils.py b/tests/utils.py index f04b86fe6..fd993628a 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -25,6 +25,7 @@ CallbackManagerForLLMRun, ) from langchain_core.language_models.llms import LLM +from pydantic import Field from nemoguardrails import LLMRails, RailsConfig from nemoguardrails.colang import parse_colang_file @@ -40,6 +41,7 @@ class FakeLLM(LLM): """Fake LLM wrapper for testing purposes.""" responses: List + prompt_history: List[str] = Field(default_factory=list, exclude=True) i: int = 0 streaming: bool = False @@ -60,7 +62,7 @@ def _call( f"No responses available for query number {self.i + 1} in FakeLLM. " "Most likely, too many LLM calls are made or additional responses need to be provided." ) - + self.prompt_history.append(prompt) response = self.responses[self.i] self.i += 1 return response @@ -77,7 +79,7 @@ async def _acall( f"No responses available for query number {self.i + 1} in FakeLLM. " "Most likely, too many LLM calls are made or additional responses need to be provided." ) - + self.prompt_history.append(prompt) response = self.responses[self.i] self.i += 1 diff --git a/tests/v2_x/test_llm_template_selection.py b/tests/v2_x/test_llm_template_selection.py new file mode 100644 index 000000000..de48bfb59 --- /dev/null +++ b/tests/v2_x/test_llm_template_selection.py @@ -0,0 +1,122 @@ +# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from rich.logging import RichHandler + +from nemoguardrails import RailsConfig +from tests.utils import TestChat + +FORMAT = "%(message)s" +logging.basicConfig( + level=logging.DEBUG, + format=FORMAT, + datefmt="[%X,%f]", + handlers=[RichHandler(markup=True)], +) + +config_1 = """ +colang_version: "2.x" + +models: + - type: main + engine: openai + model: gpt-3.5-turbo-instruct + +prompts: + - task: generate_antonym + models: + - openai/gpt-3.5-turbo + - openai/gpt-4 + messages: + - type: user + content: |- + Generate the antonym of the bot expression below. Use the syntax: bot say "". + - type: user + content: |- + YOUR TASK: + {{ flow_nld }} + + - task: repeat + models: + - openai/gpt-3.5-turbo + - openai/gpt-4 + messages: + - type: system + content: | + Your are a value generation bot that needs to generate a value for the ${{ var_name }} variable based on instructions form the user. + Be very precised and always pick the most suitable variable type (e.g. double quotes for strings). Only generated the value and do not provide any additional response. + - type: user + content: | + {{ instructions }} three times + Assign the generated value to: + ${{ var_name }} = + +""" + + +def test_template_choice_in_value_generation(): + """Test template selection in value generation""" + config = RailsConfig.from_content( + colang_content=""" + flow main + match UtteranceUserActionFinished(final_transcript="hi") + $test = ..."a random bird name{{% set template = 'repeat' %}}" + await UtteranceBotAction(script=$test) + """, + yaml_content=config_1, + ) + + chat = TestChat( + config, + llm_completions=["'parrot, raven, peacock'"], + ) + + expected_prompt = "System: Your are a value generation bot that needs to generate a value for the $test variable based on instructions form the user.\nBe very precised and always pick the most suitable variable type (e.g. double quotes for strings). Only generated the value and do not provide any additional response.\nHuman: a random bird name three times\nAssign the generated value to:\n$test =" + + chat >> "hi" + chat << "parrot, raven, peacock" + assert chat.llm.prompt_history[0] == expected_prompt + + +def test_template_choice_in_flow_generation(): + """Test template selection in flow generation""" + config = RailsConfig.from_content( + colang_content=""" + import core + flow generate antonym + \"\"\" + {% set template = "generate_antonym" %} + bot say "lucky" + \"\"\" + ... + flow main + match UtteranceUserActionFinished(final_transcript="hi") + generate antonym + """, + yaml_content=config_1, + ) + + chat = TestChat( + config, + llm_completions=["bot say 'unfortunate'"], + ) + + expected_prompt = 'Human: Generate the antonym of the bot expression below. Use the syntax: bot say "".\nHuman: YOUR TASK:\n\n\nbot say "lucky"' + + chat >> "hi" + chat << "unfortunate" + assert chat.llm.prompt_history[0] == expected_prompt