-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathtrain_flow.py
217 lines (177 loc) · 6.43 KB
/
train_flow.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
import argparse
import mlflow
import torch
from torch.optim import *
from configs.parser import YAMLParser
from dataloader.h5 import H5Loader
from loss.flow import EventWarping
from models.model import (
FireNet,
RNNFireNet,
LeakyFireNet,
FireFlowNet,
LeakyFireFlowNet,
E2VID,
EVFlowNet,
RecEVFlowNet,
LeakyRecEVFlowNet,
RNNRecEVFlowNet,
)
from models.model import (
LIFFireNet,
PLIFFireNet,
ALIFFireNet,
XLIFFireNet,
LIFFireFlowNet,
SpikingRecEVFlowNet,
PLIFRecEVFlowNet,
ALIFRecEVFlowNet,
XLIFRecEVFlowNet,
)
from utils.gradients import get_grads
from utils.utils import load_model, save_csv, save_diff, save_model
from utils.visualization import Visualization
def train(args, config_parser):
mlflow.set_tracking_uri(args.path_mlflow)
# configs
config = config_parser.config
if config["data"]["mode"] == "frames":
print("Config error: Training pipeline not compatible with frames mode.")
raise AttributeError
# log config
mlflow.set_experiment(config["experiment"])
mlflow.start_run()
mlflow.log_params(config)
mlflow.log_param("prev_runid", args.prev_runid)
config = config_parser.combine_entries(config)
print("MLflow dir:", mlflow.active_run().info.artifact_uri[:-9])
# log git diff
save_diff("train_diff.txt")
# initialize settings
device = config_parser.device
kwargs = config_parser.loader_kwargs
# visualization tool
if config["vis"]["enabled"]:
vis = Visualization(config)
# data loader
data = H5Loader(config, config["model"]["num_bins"], config["model"]["round_encoding"])
dataloader = torch.utils.data.DataLoader(
data,
drop_last=True,
batch_size=config["loader"]["batch_size"],
collate_fn=data.custom_collate,
worker_init_fn=config_parser.worker_init_fn,
**kwargs,
)
# loss function
loss_function = EventWarping(config, device)
# model initialization and settings
model = eval(config["model"]["name"])(config["model"].copy()).to(device)
model = load_model(args.prev_runid, model, device)
model.train()
# optimizers
optimizer = eval(config["optimizer"]["name"])(model.parameters(), lr=config["optimizer"]["lr"])
optimizer.zero_grad()
# simulation variables
train_loss = 0
best_loss = 1.0e6
end_train = False
grads_w = []
# training loop
data.shuffle()
while True:
for inputs in dataloader:
if data.new_seq:
data.new_seq = False
loss_function.reset()
model.reset_states()
optimizer.zero_grad()
if data.seq_num >= len(data.files):
mlflow.log_metric("loss", train_loss / (data.samples + 1), step=data.epoch)
with torch.no_grad():
if train_loss / (data.samples + 1) < best_loss:
save_model(model)
best_loss = train_loss / (data.samples + 1)
data.epoch += 1
data.samples = 0
train_loss = 0
data.seq_num = data.seq_num % len(data.files)
# save grads to file
if config["vis"]["store_grads"]:
save_csv(grads_w, "grads_w.csv")
grads_w = []
# finish training loop
if data.epoch == config["loader"]["n_epochs"]:
end_train = True
# forward pass
x = model(inputs["event_voxel"].to(device), inputs["event_cnt"].to(device))
# event flow association
loss_function.event_flow_association(
x["flow"],
inputs["event_list"].to(device),
inputs["event_list_pol_mask"].to(device),
inputs["event_mask"].to(device),
)
# backward pass
if loss_function.num_events >= config["data"]["window_loss"]:
# overwrite intermediate flow estimates with the final ones
if config["loss"]["overwrite_intermediate"]:
loss_function.overwrite_intermediate_flow(x["flow"])
# loss
loss = loss_function()
train_loss += loss.item()
# update number of loss samples seen by the network
data.samples += config["loader"]["batch_size"]
loss.backward()
# clip and save grads
if config["loss"]["clip_grad"] is not None:
torch.nn.utils.clip_grad.clip_grad_norm_(model.parameters(), config["loss"]["clip_grad"])
if config["vis"]["store_grads"]:
grads_w.append(get_grads(model.named_parameters()))
optimizer.step()
optimizer.zero_grad()
# mask flow for visualization
flow_vis = x["flow"][-1].clone()
if model.mask and config["vis"]["enabled"] and config["loader"]["batch_size"] == 1:
flow_vis *= loss_function.event_mask
model.detach_states()
loss_function.reset()
# visualize
with torch.no_grad():
if config["vis"]["enabled"] and config["loader"]["batch_size"] == 1:
vis.update(inputs, flow_vis, None)
# print training info
if config["vis"]["verbose"]:
print(
"Train Epoch: {:04d} [{:03d}/{:03d} ({:03d}%)] Loss: {:.6f}".format(
data.epoch,
data.seq_num,
len(data.files),
int(100 * data.seq_num / len(data.files)),
train_loss / (data.samples + 1),
),
end="\r",
)
if end_train:
break
mlflow.end_run()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
default="configs/train_flow.yml",
help="training configuration",
)
parser.add_argument(
"--path_mlflow",
default="",
help="location of the mlflow ui",
)
parser.add_argument(
"--prev_runid",
default="",
help="pre-trained model to use as starting point",
)
args = parser.parse_args()
# launch training
train(args, YAMLParser(args.config))