Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
lukasturcani committed Aug 20, 2024
1 parent ecf8e67 commit a0ee1ba
Show file tree
Hide file tree
Showing 10 changed files with 801 additions and 557 deletions.
86 changes: 57 additions & 29 deletions docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@
],
"source": [
"import os\n",
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress CUDA warnings from tensorflow\n",
"\n",
"os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" # suppress CUDA warnings from tensorflow\n",
"\n",
"# import the necessary packages\n",
"from omlt import OmltBlock, OffsetScaling\n",
Expand Down Expand Up @@ -151,10 +152,23 @@
],
"source": [
"# read in our csv data\n",
"columns = ['Bypass Fraction', 'NG Steam Ratio', 'Steam Flow',\n",
" 'Reformer Duty','AR', 'C2H6', 'C3H8', 'C4H10',\n",
" 'CH4', 'CO', 'CO2', 'H2', 'H2O', 'N2']\n",
"df = pd.read_csv('../data/reformer.csv', usecols=columns)\n",
"columns = [\n",
" \"Bypass Fraction\",\n",
" \"NG Steam Ratio\",\n",
" \"Steam Flow\",\n",
" \"Reformer Duty\",\n",
" \"AR\",\n",
" \"C2H6\",\n",
" \"C3H8\",\n",
" \"C4H10\",\n",
" \"CH4\",\n",
" \"CO\",\n",
" \"CO2\",\n",
" \"H2\",\n",
" \"H2O\",\n",
" \"N2\",\n",
"]\n",
"df = pd.read_csv(\"../data/reformer.csv\", usecols=columns)\n",
"print(df)"
]
},
Expand All @@ -169,9 +183,21 @@
"outputs": [],
"source": [
"# separate the data into inputs and outputs\n",
"inputs = ['Bypass Fraction', 'NG Steam Ratio']\n",
"outputs = [ 'Steam Flow', 'Reformer Duty','AR', 'C2H6', 'C3H8', 'C4H10',\n",
" 'CH4', 'CO', 'CO2', 'H2', 'H2O', 'N2']\n",
"inputs = [\"Bypass Fraction\", \"NG Steam Ratio\"]\n",
"outputs = [\n",
" \"Steam Flow\",\n",
" \"Reformer Duty\",\n",
" \"AR\",\n",
" \"C2H6\",\n",
" \"C3H8\",\n",
" \"C4H10\",\n",
" \"CH4\",\n",
" \"CO\",\n",
" \"CO2\",\n",
" \"H2\",\n",
" \"H2O\",\n",
" \"N2\",\n",
"]\n",
"dfin = df[inputs]\n",
"dfout = df[outputs]"
]
Expand Down Expand Up @@ -222,13 +248,13 @@
],
"source": [
"# create our Keras Sequential model\n",
"nn = Sequential(name='reformer_relu_4_20')\n",
"nn.add(Dense(units=10, input_dim=len(inputs), activation='relu'))\n",
"nn.add(Dense(units=10, activation='relu'))\n",
"nn.add(Dense(units=10, activation='relu'))\n",
"nn.add(Dense(units=10, activation='relu'))\n",
"nn = Sequential(name=\"reformer_relu_4_20\")\n",
"nn.add(Dense(units=10, input_dim=len(inputs), activation=\"relu\"))\n",
"nn.add(Dense(units=10, activation=\"relu\"))\n",
"nn.add(Dense(units=10, activation=\"relu\"))\n",
"nn.add(Dense(units=10, activation=\"relu\"))\n",
"nn.add(Dense(units=len(outputs)))\n",
"nn.compile(optimizer=Adam(), loss='mse')"
"nn.compile(optimizer=Adam(), loss=\"mse\")"
]
},
{
Expand Down Expand Up @@ -468,7 +494,7 @@
"# save the model to disk\n",
"# While not technically necessary, this shows how we can load a previously saved model into\n",
"# our optimization formulation)\n",
"nn.save('reformer_nn_relu.keras')"
"nn.save(\"reformer_nn_relu.keras\")"
]
},
{
Expand Down Expand Up @@ -522,22 +548,24 @@
"outputs": [],
"source": [
"# load the Keras model\n",
"nn_reformer = keras.models.load_model('reformer_nn_relu.keras', compile=False)\n",
"nn_reformer = keras.models.load_model(\"reformer_nn_relu.keras\", compile=False)\n",
"\n",
"# Note: The neural network is in the scaled space. We want access to the\n",
"# variables in the unscaled space. Therefore, we need to tell OMLT about the\n",
"# scaling factors\n",
"scaler = OffsetScaling(\n",
" offset_inputs={i: x_offset[inputs[i]] for i in range(len(inputs))},\n",
" factor_inputs={i: x_factor[inputs[i]] for i in range(len(inputs))},\n",
" offset_outputs={i: y_offset[outputs[i]] for i in range(len(outputs))},\n",
" factor_outputs={i: y_factor[outputs[i]] for i in range(len(outputs))}\n",
" )\n",
" offset_inputs={i: x_offset[inputs[i]] for i in range(len(inputs))},\n",
" factor_inputs={i: x_factor[inputs[i]] for i in range(len(inputs))},\n",
" offset_outputs={i: y_offset[outputs[i]] for i in range(len(outputs))},\n",
" factor_outputs={i: y_factor[outputs[i]] for i in range(len(outputs))},\n",
")\n",
"\n",
"scaled_input_bounds = {i: (scaled_lb[i], scaled_ub[i]) for i in range(len(inputs))}\n",
"\n",
"# create a network definition from the Keras model\n",
"net = load_keras_sequential(nn_reformer, scaling_object=scaler, scaled_input_bounds=scaled_input_bounds)\n",
"net = load_keras_sequential(\n",
" nn_reformer, scaling_object=scaler, scaled_input_bounds=scaled_input_bounds\n",
")\n",
"\n",
"# create the variables and constraints for the neural network in Pyomo\n",
"m.reformer.build_formulation(ReluBigMFormulation(net))"
Expand All @@ -554,8 +582,8 @@
"outputs": [],
"source": [
"# now add the objective and the constraints\n",
"h2_idx = outputs.index('H2')\n",
"n2_idx = outputs.index('N2')\n",
"h2_idx = outputs.index(\"H2\")\n",
"n2_idx = outputs.index(\"N2\")\n",
"m.obj = pyo.Objective(expr=m.reformer.outputs[h2_idx], sense=pyo.maximize)\n",
"m.con = pyo.Constraint(expr=m.reformer.outputs[n2_idx] <= 0.34)"
]
Expand All @@ -571,7 +599,7 @@
"outputs": [],
"source": [
"# now solve the optimization problem (this may take some time)\n",
"solver = pyo.SolverFactory('cbc')\n",
"solver = pyo.SolverFactory(\"cbc\")\n",
"status = solver.solve(m, tee=False)"
]
},
Expand All @@ -596,10 +624,10 @@
}
],
"source": [
"print('Bypass Fraction:', pyo.value(m.reformer.inputs[0]))\n",
"print('NG Steam Ratio:', pyo.value(m.reformer.inputs[1]))\n",
"print('H2 Concentration:', pyo.value(m.reformer.outputs[h2_idx]))\n",
"print('N2 Concentration:', pyo.value(m.reformer.outputs[n2_idx]))"
"print(\"Bypass Fraction:\", pyo.value(m.reformer.inputs[0]))\n",
"print(\"NG Steam Ratio:\", pyo.value(m.reformer.inputs[1]))\n",
"print(\"H2 Concentration:\", pyo.value(m.reformer.outputs[h2_idx]))\n",
"print(\"N2 Concentration:\", pyo.value(m.reformer.outputs[n2_idx]))"
]
}
],
Expand Down
86 changes: 57 additions & 29 deletions docs/notebooks/neuralnet/auto-thermal-reformer.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,8 @@
"outputs": [],
"source": [
"import os\n",
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress CUDA warnings from tensorflow\n",
"\n",
"os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" # suppress CUDA warnings from tensorflow\n",
"\n",
"# import the necessary packages\n",
"from omlt import OmltBlock, OffsetScaling\n",
Expand Down Expand Up @@ -140,10 +141,23 @@
],
"source": [
"# read in our csv data\n",
"columns = ['Bypass Fraction', 'NG Steam Ratio', 'Steam Flow',\n",
" 'Reformer Duty','AR', 'C2H6', 'C3H8', 'C4H10',\n",
" 'CH4', 'CO', 'CO2', 'H2', 'H2O', 'N2']\n",
"df = pd.read_csv('../data/reformer.csv', usecols=columns)\n",
"columns = [\n",
" \"Bypass Fraction\",\n",
" \"NG Steam Ratio\",\n",
" \"Steam Flow\",\n",
" \"Reformer Duty\",\n",
" \"AR\",\n",
" \"C2H6\",\n",
" \"C3H8\",\n",
" \"C4H10\",\n",
" \"CH4\",\n",
" \"CO\",\n",
" \"CO2\",\n",
" \"H2\",\n",
" \"H2O\",\n",
" \"N2\",\n",
"]\n",
"df = pd.read_csv(\"../data/reformer.csv\", usecols=columns)\n",
"print(df)"
]
},
Expand All @@ -158,9 +172,21 @@
"outputs": [],
"source": [
"# separate the data into inputs and outputs\n",
"inputs = ['Bypass Fraction', 'NG Steam Ratio']\n",
"outputs = [ 'Steam Flow', 'Reformer Duty','AR', 'C2H6', 'C3H8', 'C4H10',\n",
" 'CH4', 'CO', 'CO2', 'H2', 'H2O', 'N2']\n",
"inputs = [\"Bypass Fraction\", \"NG Steam Ratio\"]\n",
"outputs = [\n",
" \"Steam Flow\",\n",
" \"Reformer Duty\",\n",
" \"AR\",\n",
" \"C2H6\",\n",
" \"C3H8\",\n",
" \"C4H10\",\n",
" \"CH4\",\n",
" \"CO\",\n",
" \"CO2\",\n",
" \"H2\",\n",
" \"H2O\",\n",
" \"N2\",\n",
"]\n",
"dfin = df[inputs]\n",
"dfout = df[outputs]"
]
Expand Down Expand Up @@ -211,13 +237,13 @@
],
"source": [
"# create our Keras Sequential model\n",
"nn = Sequential(name='reformer_sigmoid_4_20')\n",
"nn.add(Dense(units=20, input_dim=len(inputs), activation='sigmoid'))\n",
"nn.add(Dense(units=20, activation='sigmoid'))\n",
"nn.add(Dense(units=20, activation='sigmoid'))\n",
"nn.add(Dense(units=20, activation='sigmoid'))\n",
"nn = Sequential(name=\"reformer_sigmoid_4_20\")\n",
"nn.add(Dense(units=20, input_dim=len(inputs), activation=\"sigmoid\"))\n",
"nn.add(Dense(units=20, activation=\"sigmoid\"))\n",
"nn.add(Dense(units=20, activation=\"sigmoid\"))\n",
"nn.add(Dense(units=20, activation=\"sigmoid\"))\n",
"nn.add(Dense(units=len(outputs)))\n",
"nn.compile(optimizer=Adam(), loss='mse')"
"nn.compile(optimizer=Adam(), loss=\"mse\")"
]
},
{
Expand Down Expand Up @@ -457,7 +483,7 @@
"# save the model to disk\n",
"# While not technically necessary, this shows how we can load a previously saved model into\n",
"# our optimization formulation)\n",
"nn.save('reformer_nn.keras')"
"nn.save(\"reformer_nn.keras\")"
]
},
{
Expand Down Expand Up @@ -511,22 +537,24 @@
"outputs": [],
"source": [
"# load the Keras model\n",
"nn_reformer = keras.models.load_model('reformer_nn.keras', compile=False)\n",
"nn_reformer = keras.models.load_model(\"reformer_nn.keras\", compile=False)\n",
"\n",
"# Note: The neural network is in the scaled space. We want access to the\n",
"# variables in the unscaled space. Therefore, we need to tell OMLT about the\n",
"# scaling factors\n",
"scaler = OffsetScaling(\n",
" offset_inputs={i: x_offset[inputs[i]] for i in range(len(inputs))},\n",
" factor_inputs={i: x_factor[inputs[i]] for i in range(len(inputs))},\n",
" offset_outputs={i: y_offset[outputs[i]] for i in range(len(outputs))},\n",
" factor_outputs={i: y_factor[outputs[i]] for i in range(len(outputs))}\n",
" )\n",
" offset_inputs={i: x_offset[inputs[i]] for i in range(len(inputs))},\n",
" factor_inputs={i: x_factor[inputs[i]] for i in range(len(inputs))},\n",
" offset_outputs={i: y_offset[outputs[i]] for i in range(len(outputs))},\n",
" factor_outputs={i: y_factor[outputs[i]] for i in range(len(outputs))},\n",
")\n",
"\n",
"scaled_input_bounds = {i: (scaled_lb[i], scaled_ub[i]) for i in range(len(inputs))}\n",
"\n",
"# create a network definition from the Keras model\n",
"net = load_keras_sequential(nn_reformer, scaling_object=scaler, scaled_input_bounds=scaled_input_bounds)\n",
"net = load_keras_sequential(\n",
" nn_reformer, scaling_object=scaler, scaled_input_bounds=scaled_input_bounds\n",
")\n",
"\n",
"# create the variables and constraints for the neural network in Pyomo\n",
"m.reformer.build_formulation(FullSpaceSmoothNNFormulation(net))"
Expand All @@ -543,8 +571,8 @@
"outputs": [],
"source": [
"# now add the objective and the constraints\n",
"h2_idx = outputs.index('H2')\n",
"n2_idx = outputs.index('N2')\n",
"h2_idx = outputs.index(\"H2\")\n",
"n2_idx = outputs.index(\"N2\")\n",
"m.obj = pyo.Objective(expr=m.reformer.outputs[h2_idx], sense=pyo.maximize)\n",
"m.con = pyo.Constraint(expr=m.reformer.outputs[n2_idx] <= 0.34)"
]
Expand Down Expand Up @@ -687,7 +715,7 @@
],
"source": [
"# now solve the optimization problem\n",
"solver = pyo.SolverFactory('ipopt')\n",
"solver = pyo.SolverFactory(\"ipopt\")\n",
"status = solver.solve(m, tee=True)"
]
},
Expand All @@ -712,10 +740,10 @@
}
],
"source": [
"print('Bypass Fraction:', pyo.value(m.reformer.inputs[0]))\n",
"print('NG Steam Ratio:', pyo.value(m.reformer.inputs[1]))\n",
"print('H2 Concentration:', pyo.value(m.reformer.outputs[h2_idx]))\n",
"print('N2 Concentration:', pyo.value(m.reformer.outputs[n2_idx]))"
"print(\"Bypass Fraction:\", pyo.value(m.reformer.inputs[0]))\n",
"print(\"NG Steam Ratio:\", pyo.value(m.reformer.inputs[1]))\n",
"print(\"H2 Concentration:\", pyo.value(m.reformer.outputs[h2_idx]))\n",
"print(\"N2 Concentration:\", pyo.value(m.reformer.outputs[n2_idx]))"
]
}
],
Expand Down
Loading

0 comments on commit a0ee1ba

Please sign in to comment.