Skip to content

Commit

Permalink
fix the bug in set pred score
Browse files Browse the repository at this point in the history
  • Loading branch information
liyin2015 committed Jan 8, 2025
1 parent e204688 commit bdd0418
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 10 deletions.
11 changes: 7 additions & 4 deletions adalflow/adalflow/core/generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@

log = logging.getLogger(__name__)

DEBUG_MODE = os.environ.get("DEBUG_MODE", True)
DEBUG_MODE = os.environ.get("DEBUG_MODE", False)

PromptArgType = Dict[str, Union[str, Parameter]]

Expand Down Expand Up @@ -659,7 +659,8 @@ def backward(
# backward score to the demo parameter
for pred in children_params:
# if pred.requires_opt:
pred.set_score(response.score)
if response.score is not None:
pred.set_score(response.score)
log.debug(
f"backpropagate the score {response.score} to {pred.name}, is_teacher: {self.teacher_mode}"
)
Expand Down Expand Up @@ -877,7 +878,8 @@ def _backward_through_all_predecessors(
)
var_gradient.add_prompt(backward_engine_prompt_str)
pred.add_gradient(var_gradient)
pred.set_score(response.score)
if response.score is not None:
pred.set_score(response.score)

@staticmethod
def _backward_through_one_predecessor(
Expand Down Expand Up @@ -1024,7 +1026,8 @@ def _backward_through_one_predecessor(
)
var_gradient.add_prompt(backward_engine_prompt_str)
pred.add_gradient(var_gradient)
pred.set_score(response.score)
if response.score is not None:
pred.set_score(response.score)

def _run_callbacks(
self,
Expand Down
13 changes: 10 additions & 3 deletions adalflow/adalflow/optim/grad_component.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
)
from adalflow.optim.types import ParameterType
from adalflow.core.types import GeneratorOutput
from adalflow.utils import printc

import json

Expand Down Expand Up @@ -212,7 +213,8 @@ def backward(self, *, response: "Parameter", id: str = None, **kwargs):
pred.backward_engine_disabled = True

for _, pred in enumerate(children_params):
pred.set_score(response.score)
if response.score is not None:
pred.set_score(response.score)

if pred.param_type == ParameterType.DEMOS:
pred.add_score_to_trace(
Expand Down Expand Up @@ -377,6 +379,8 @@ def _backward_through_one_predecessor(
metadata: Dict[str, str] = None,
):
if not pred.requires_opt:
if response.score is not None:
pred.set_score(response.score)
log.debug(
f"EvalFnToTextLoss: Skipping {pred} as it does not require optimization."
)
Expand Down Expand Up @@ -492,7 +496,8 @@ def _backward_through_one_predecessor(

# backward the end to end score
# TODO: not really useful
pred.set_score(response.score)
if response.score is not None:
pred.set_score(response.score)
pred.set_gt(ground_truth)
print(f"pred: {pred.name}, gt: {ground_truth}")
# print(f"setting pred name {pred.name} score to {response.data}")
Expand Down Expand Up @@ -533,9 +538,11 @@ def backward(self, *, response: "OutputParameter", id: str = None, **kwargs):
else:

for _, pred in enumerate(children_params):
if response.score is not None:
pred.set_score(response.score)
printc(f"score score for pred name: {pred.name}")
if not pred.requires_opt:
continue
pred.set_score(response.score)

if pred.param_type == ParameterType.DEMOS:
pred.add_score_to_trace(
Expand Down
4 changes: 3 additions & 1 deletion adalflow/adalflow/optim/parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -615,7 +615,9 @@ def set_score(self, score: float):
But this score is only used to relay the score to the demo parametr.
"""
if not isinstance(score, float):
raise ValueError(f"score is not float, but {type(score)}")
raise ValueError(
f"score is not float, but {type(score)}, parameter name: {self.name}"
)
self.score = score

def add_dataclass_to_trace(self, trace: DataClass, is_teacher: bool = True):
Expand Down
5 changes: 4 additions & 1 deletion adalflow/adalflow/optim/text_grad/text_loss_with_eval_fn.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,8 @@ def _backward_through_one_predecessor(
metadata: Dict[str, str] = None,
):
if not pred.requires_opt:
if response.score is not None:
pred.set_score(response.score)
log.debug(
f"EvalFnToTextLoss: Skipping {pred} as it does not require optimization."
)
Expand Down Expand Up @@ -308,7 +310,8 @@ def _backward_through_one_predecessor(

# backward the end to end score
# TODO: not really useful
pred.set_score(response.data)
if response.score is not None:
pred.set_score(response.score)
pred.set_gt(ground_truth)
print(f"pred: {pred.name}, gt: {ground_truth}")
# print(f"setting pred name {pred.name} score to {response.data}")
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/hotpot_qa/adal_exp/build_multi_hop_rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def __init__(self, model_client, model_kwargs, passages_per_hop=3, max_hops=2):
data=task_desc_str,
# data=trained_task_desc_strs[i],
role_desc=f"Task description for {i+1}th LLM as a query generator",
requires_opt=True,
requires_opt=False,
param_type=ParameterType.PROMPT,
),
"output_format_str": self.data_parser.get_output_format_str(),
Expand Down

0 comments on commit bdd0418

Please sign in to comment.