From b499c78e91bd79ac61bf3f279c193cf7624390d7 Mon Sep 17 00:00:00 2001 From: Hongkai Dai Date: Tue, 5 Mar 2024 21:51:32 +0800 Subject: [PATCH] Solve an optimization problem with backoff. --- compatible_clf_cbf/utils.py | 35 +++++++++++++++++++++++++++++ tests/test_utils.py | 45 +++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) diff --git a/compatible_clf_cbf/utils.py b/compatible_clf_cbf/utils.py index cd99ae4..3dc15a3 100644 --- a/compatible_clf_cbf/utils.py +++ b/compatible_clf_cbf/utils.py @@ -230,12 +230,47 @@ def solve_with_id( prog: solvers.MathematicalProgram, solver_id: Optional[solvers.SolverId] = None, solver_options: Optional[solvers.SolverOptions] = None, + backoff_scale: Optional[float] = None, ) -> solvers.MathematicalProgramResult: + """ + Args: + backoff_scale: when solving an optimization problem with an objective function, + we first solve the problem to optimality, and then "back off" a little bit to find + a sub-optimal but strictly feasible solution. backoff_scale=0 corresponds to no + backoff. Note that during backing off, we will modify the original `prog`. + """ if solver_id is None: result = solvers.Solve(prog, None, solver_options) else: solver = solvers.MakeSolver(solver_id) result = solver.Solve(prog, None, solver_options) + if ( + len(prog.linear_costs()) > 0 or len(prog.quadratic_costs()) > 0 + ) and backoff_scale is not None: + assert ( + len(prog.linear_costs()) == 1 + ), "TODO(hongkai.dai): support program with multiple LinearCost objects." + assert ( + len(prog.quadratic_costs()) == 0 + ), "TODO(hongkai.dai): we currently only support program with linear costs." + assert backoff_scale >= 0, "backoff_scale should be non-negative." + optimal_cost = result.get_optimal_cost() + coeff_cost = prog.linear_costs()[0].evaluator().a() + var_cost = prog.linear_costs()[0].variables() + constant_cost = prog.linear_costs()[0].evaluator().b() + prog.RemoveCost(prog.linear_costs()[0]) + cost_upper_bound = ( + optimal_cost * (1 + backoff_scale) + if optimal_cost > 0 + else optimal_cost * (1 - backoff_scale) + ) + prog.AddLinearConstraint( + coeff_cost, -np.inf, cost_upper_bound - constant_cost, var_cost + ) + if solver_id is None: + result = solvers.Solve(prog, None, solver_options) + else: + result = solver.Solve(prog, None, solver_options) return result diff --git a/tests/test_utils.py b/tests/test_utils.py index e340159..c55cccd 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -113,3 +113,48 @@ def test_add_constraint3(self): assert lagrangians_result.inner_ineq[1].TotalDegree() == 0 assert lagrangians_result.inner_eq[0].TotalDegree() == 1 assert lagrangians_result.outer.TotalDegree() == 0 + + +def test_solve_w_id(): + prog = solvers.MathematicalProgram() + x = prog.NewContinuousVariables(2) + prog.AddBoundingBoxConstraint(-1, 1, x) + prog.AddLinearCost(x[0] + x[1] + 1) + result = mut.solve_with_id( + prog, solver_id=None, solver_options=None, backoff_scale=0.1 + ) + assert result.is_success() + # I know the optimal solution is obtained at (-1, -1), with the optimal cost being + # -1. Hence by backing off, the solution should satisfy x[0] + x[1] + 1 <= -0.9 + x_sol = result.GetSolution(x) + assert x_sol[0] + x_sol[1] + 1 <= -0.9 + 1E-5 + # Now add the objective max x[0] + x[1]. The maximazation should be + # x[0] + x[1] = -1.9 + prog.AddLinearCost(-x[0] - x[1]) + result = mut.solve_with_id( + prog, solver_id=None, solver_options=None, backoff_scale=None + ) + x_sol = result.GetSolution(x) + np.testing.assert_allclose(x_sol[0] + x_sol[1], -1.9, atol=1e-5) + + # Now test the problem with a positive optimal cost. + prog = solvers.MathematicalProgram() + x = prog.NewContinuousVariables(2) + prog.AddBoundingBoxConstraint(-1, 1, x) + prog.AddLinearCost(x[0] + x[1] + 3) + result = mut.solve_with_id( + prog, solver_id=None, solver_options=None, backoff_scale=0.1 + ) + assert result.is_success() + # I know the optimal solution is obtained at (-1, -1), with the optimal cost being + # 1. Hence by backing off, the solutionshould satisfy x[0] + x[1] + 3 <= 1.1 + x_sol = result.GetSolution(x) + assert x_sol[0] + x_sol[1] + 3 <= 1.1 + 1E-5 + # Now add the objective max x[0] + x[1]. The maximization should be + # x[0] + x[1] = -1.9 + prog.AddLinearCost(-x[0] - x[1]) + result = mut.solve_with_id( + prog, solver_id=None, solver_options=None, backoff_scale=None + ) + x_sol = result.GetSolution(x) + np.testing.assert_allclose(x_sol[0] + x_sol[1], -1.9, atol=1e-5)