From bdd04183654e02052847c00effdd532fd158fe9c Mon Sep 17 00:00:00 2001
From: Li Yin
Date: Tue, 7 Jan 2025 19:31:12 -0800
Subject: [PATCH] fix the bug in set pred score
---
adalflow/adalflow/core/generator.py | 11 +++++++----
adalflow/adalflow/optim/grad_component.py | 13 ++++++++++---
adalflow/adalflow/optim/parameter.py | 4 +++-
.../optim/text_grad/text_loss_with_eval_fn.py | 5 ++++-
.../hotpot_qa/adal_exp/build_multi_hop_rag.py | 2 +-
5 files changed, 25 insertions(+), 10 deletions(-)
diff --git a/adalflow/adalflow/core/generator.py b/adalflow/adalflow/core/generator.py
index 662c35ac..f3618e80 100644
--- a/adalflow/adalflow/core/generator.py
+++ b/adalflow/adalflow/core/generator.py
@@ -59,7 +59,7 @@
log = logging.getLogger(__name__)
-DEBUG_MODE = os.environ.get("DEBUG_MODE", True)
+DEBUG_MODE = os.environ.get("DEBUG_MODE", False)
PromptArgType = Dict[str, Union[str, Parameter]]
@@ -659,7 +659,8 @@ def backward(
# backward score to the demo parameter
for pred in children_params:
# if pred.requires_opt:
- pred.set_score(response.score)
+ if response.score is not None:
+ pred.set_score(response.score)
log.debug(
f"backpropagate the score {response.score} to {pred.name}, is_teacher: {self.teacher_mode}"
)
@@ -877,7 +878,8 @@ def _backward_through_all_predecessors(
)
var_gradient.add_prompt(backward_engine_prompt_str)
pred.add_gradient(var_gradient)
- pred.set_score(response.score)
+ if response.score is not None:
+ pred.set_score(response.score)
@staticmethod
def _backward_through_one_predecessor(
@@ -1024,7 +1026,8 @@ def _backward_through_one_predecessor(
)
var_gradient.add_prompt(backward_engine_prompt_str)
pred.add_gradient(var_gradient)
- pred.set_score(response.score)
+ if response.score is not None:
+ pred.set_score(response.score)
def _run_callbacks(
self,
diff --git a/adalflow/adalflow/optim/grad_component.py b/adalflow/adalflow/optim/grad_component.py
index 312dac04..32e986f1 100644
--- a/adalflow/adalflow/optim/grad_component.py
+++ b/adalflow/adalflow/optim/grad_component.py
@@ -18,6 +18,7 @@
)
from adalflow.optim.types import ParameterType
from adalflow.core.types import GeneratorOutput
+from adalflow.utils import printc
import json
@@ -212,7 +213,8 @@ def backward(self, *, response: "Parameter", id: str = None, **kwargs):
pred.backward_engine_disabled = True
for _, pred in enumerate(children_params):
- pred.set_score(response.score)
+ if response.score is not None:
+ pred.set_score(response.score)
if pred.param_type == ParameterType.DEMOS:
pred.add_score_to_trace(
@@ -377,6 +379,8 @@ def _backward_through_one_predecessor(
metadata: Dict[str, str] = None,
):
if not pred.requires_opt:
+ if response.score is not None:
+ pred.set_score(response.score)
log.debug(
f"EvalFnToTextLoss: Skipping {pred} as it does not require optimization."
)
@@ -492,7 +496,8 @@ def _backward_through_one_predecessor(
# backward the end to end score
# TODO: not really useful
- pred.set_score(response.score)
+ if response.score is not None:
+ pred.set_score(response.score)
pred.set_gt(ground_truth)
print(f"pred: {pred.name}, gt: {ground_truth}")
# print(f"setting pred name {pred.name} score to {response.data}")
@@ -533,9 +538,11 @@ def backward(self, *, response: "OutputParameter", id: str = None, **kwargs):
else:
for _, pred in enumerate(children_params):
+ if response.score is not None:
+ pred.set_score(response.score)
+ printc(f"score score for pred name: {pred.name}")
if not pred.requires_opt:
continue
- pred.set_score(response.score)
if pred.param_type == ParameterType.DEMOS:
pred.add_score_to_trace(
diff --git a/adalflow/adalflow/optim/parameter.py b/adalflow/adalflow/optim/parameter.py
index 0cd48b62..617c00ad 100644
--- a/adalflow/adalflow/optim/parameter.py
+++ b/adalflow/adalflow/optim/parameter.py
@@ -615,7 +615,9 @@ def set_score(self, score: float):
But this score is only used to relay the score to the demo parametr.
"""
if not isinstance(score, float):
- raise ValueError(f"score is not float, but {type(score)}")
+ raise ValueError(
+ f"score is not float, but {type(score)}, parameter name: {self.name}"
+ )
self.score = score
def add_dataclass_to_trace(self, trace: DataClass, is_teacher: bool = True):
diff --git a/adalflow/adalflow/optim/text_grad/text_loss_with_eval_fn.py b/adalflow/adalflow/optim/text_grad/text_loss_with_eval_fn.py
index 25e6895f..6df51a9a 100644
--- a/adalflow/adalflow/optim/text_grad/text_loss_with_eval_fn.py
+++ b/adalflow/adalflow/optim/text_grad/text_loss_with_eval_fn.py
@@ -193,6 +193,8 @@ def _backward_through_one_predecessor(
metadata: Dict[str, str] = None,
):
if not pred.requires_opt:
+ if response.score is not None:
+ pred.set_score(response.score)
log.debug(
f"EvalFnToTextLoss: Skipping {pred} as it does not require optimization."
)
@@ -308,7 +310,8 @@ def _backward_through_one_predecessor(
# backward the end to end score
# TODO: not really useful
- pred.set_score(response.data)
+ if response.score is not None:
+ pred.set_score(response.score)
pred.set_gt(ground_truth)
print(f"pred: {pred.name}, gt: {ground_truth}")
# print(f"setting pred name {pred.name} score to {response.data}")
diff --git a/benchmarks/hotpot_qa/adal_exp/build_multi_hop_rag.py b/benchmarks/hotpot_qa/adal_exp/build_multi_hop_rag.py
index f2a07a09..81b75d1f 100644
--- a/benchmarks/hotpot_qa/adal_exp/build_multi_hop_rag.py
+++ b/benchmarks/hotpot_qa/adal_exp/build_multi_hop_rag.py
@@ -337,7 +337,7 @@ def __init__(self, model_client, model_kwargs, passages_per_hop=3, max_hops=2):
data=task_desc_str,
# data=trained_task_desc_strs[i],
role_desc=f"Task description for {i+1}th LLM as a query generator",
- requires_opt=True,
+ requires_opt=False,
param_type=ParameterType.PROMPT,
),
"output_format_str": self.data_parser.get_output_format_str(),