-
Notifications
You must be signed in to change notification settings - Fork 6
/
train.py
364 lines (310 loc) · 16.8 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import math
import time
import os
from six.moves import cPickle
import traceback
import pdb
import opts
import models
from dataloader import *
import skimage.io
import eval_utils
import misc.utils as utils
from misc.rewards import init_scorer, get_self_critical_reward
from misc.loss_wrapper import LossWrapper
from tqdm import tqdm
try:
import tensorboardX as tb
except ImportError:
print("tensorboardX is not installed")
tb = None
write_summary = False
eval_ = True
def add_summary_value(writer, key, value, iteration):
if writer:
writer.add_scalar(key, value, iteration)
def add_summary_values(writer, keys, values, iteration):
if writer:
writer.add_scalar(key, value, iteration)
def train(opt):
print("=================Training Information==============")
print("start from {}".format(opt.start_from))
print("box from {}".format(opt.input_box_dir))
print("input json {}".format(opt.input_json))
print("attributes from {}".format(opt.input_att_dir))
print("features from {}".format(opt.input_fc_dir))
print("batch size ={}".format(opt.batch_size))
print("#GPU={}".format(torch.cuda.device_count()))
# Deal with feature things before anything
opt.use_fc, opt.use_att = utils.if_use_feat(opt.caption_model)
if opt.use_box:
opt.att_feat_size = opt.att_feat_size + 5
acc_steps = getattr(opt, 'acc_steps', 1)
name_append = opt.name_append
if len(name_append) > 0 and name_append[0] != '-':
name_append = '_' + name_append
loader = DataLoader(opt)
opt.vocab_size = loader.vocab_size
opt.seq_length = loader.seq_length
opt.write_summary = write_summary
if opt.write_summary:
print("write summary to {}".format(opt.checkpoint_path))
tb_summary_writer = tb and tb.SummaryWriter(opt.checkpoint_path)
infos = {}
histories = {}
if opt.start_from is not None:
# open old infos and check if models are compatible
infors_path = os.path.join(opt.start_from, 'infos' + name_append + '.pkl')
print("Load model information {}".format(infors_path))
with open(infors_path, 'rb') as f:
infos = utils.pickle_load(f)
saved_model_opt = infos['opt']
need_be_same = ["caption_model", "rnn_type", "rnn_size", "num_layers"]
for checkme in need_be_same:
assert vars(saved_model_opt)[checkme] == vars(opt)[checkme], "Command line argument and saved model disagree on '%s' " % checkme
histories_path = os.path.join(opt.start_from, 'histories_' + name_append + '.pkl')
if os.path.isfile(histories_path):
with open(histories_path, 'rb') as f:
histories = utils.pickle_load(f)
else: # start from scratch
print("Initialize training process from all begining")
infos['iter'] = 0
infos['epoch'] = 0
infos['iterators'] = loader.iterators
infos['split_ix'] = loader.split_ix
infos['vocab'] = loader.get_vocab()
infos['opt'] = opt
iteration = infos.get('iter', 0)
epoch = infos.get('epoch', 0)
# sanity check for the saved model name has a correct index
if opt.name_append.isdigit() and int(opt.name_append) < 100:
assert int(opt.name_append) == epoch, "dismatch in the model index and the real epoch number"
epoch += 1
print("==================start from {} epoch================".format(epoch))
val_result_history = histories.get('val_result_history', {})
loss_history = histories.get('loss_history', {})
lr_history = histories.get('lr_history', {})
ss_prob_history = histories.get('ss_prob_history', {})
# pdb.set_trace()
loader.iterators = infos.get('iterators', loader.iterators)
start_Img_idx = loader.iterators['train']
loader.split_ix = infos.get('split_ix', loader.split_ix)
if opt.load_best_score == 1:
best_val_score = infos.get('best_val_score', None)
opt.vocab = loader.get_vocab()
model = models.setup(opt).cuda()
del opt.vocab
dp_model = torch.nn.DataParallel(model)
lw_model = LossWrapper(model, opt) # wrap loss into model
dp_lw_model = torch.nn.DataParallel(lw_model)
epoch_done = True
# Assure in training mode
dp_lw_model.train()
if opt.noamopt:
assert opt.caption_model in ['transformer', 'aoa'], 'noamopt can only work with transformer'
optimizer = utils.get_std_opt(model, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)
optimizer._step = iteration
elif opt.reduce_on_plateau:
optimizer = utils.build_optimizer(model.parameters(), opt)
optimizer = utils.ReduceLROnPlateau(optimizer, factor=0.5, patience=3)
else:
optimizer = utils.build_optimizer(model.parameters(), opt)
# Load the optimizer
if vars(opt).get('start_from', None) is not None:
optimizer_path = os.path.join(opt.start_from, 'optimizer' + name_append + '.pth')
if os.path.isfile(optimizer_path):
print("Loading optimizer............")
optimizer.load_state_dict(torch.load(optimizer_path))
def save_checkpoint(model, infos, optimizer, histories=None, append=''):
if len(append) > 0:
append = '_' + append
# if checkpoint_path doesn't exist
if not os.path.isdir(opt.checkpoint_path):
os.makedirs(opt.checkpoint_path)
checkpoint_path = os.path.join(opt.checkpoint_path, 'model%s.pth' % (append))
torch.save(model.state_dict(), checkpoint_path)
print("Save model state to {}".format(checkpoint_path))
optimizer_path = os.path.join(opt.checkpoint_path, 'optimizer%s.pth' % (append))
torch.save(optimizer.state_dict(), optimizer_path)
print("Save model optimizer to {}".format(optimizer_path))
with open(os.path.join(opt.checkpoint_path, 'infos' + '%s.pkl' % (append)), 'wb') as f:
utils.pickle_dump(infos, f)
print("Save training information to {}".format(os.path.join(opt.checkpoint_path, 'infos' + '%s.pkl' % (append))))
if histories:
with open(os.path.join(opt.checkpoint_path, 'histories_' + '%s.pkl' % (append)), 'wb') as f:
utils.pickle_dump(histories, f)
print("Save training historyes to {}".format(os.path.join(opt.checkpoint_path, 'histories_' + opt.id + '%s.pkl' % (append))))
try:
while True:
# pdb.set_trace()
if epoch_done:
if not opt.noamopt and not opt.reduce_on_plateau:
# Assign the learning rate
if epoch > opt.learning_rate_decay_start and opt.learning_rate_decay_start >= 0:
frac = (epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every
decay_factor = opt.learning_rate_decay_rate ** frac
opt.current_lr = opt.learning_rate * decay_factor
else:
opt.current_lr = opt.learning_rate
utils.set_lr(optimizer, opt.current_lr) # set the decayed rate
# Assign the scheduled sampling prob
if epoch > opt.scheduled_sampling_start and opt.scheduled_sampling_start >= 0:
frac = (epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every
opt.ss_prob = min(opt.scheduled_sampling_increase_prob * frac, opt.scheduled_sampling_max_prob)
model.ss_prob = opt.ss_prob
# If start self critical training
if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:
sc_flag = True
init_scorer(opt.cached_tokens)
else:
sc_flag = False
epoch_done = False
print("{}th Epoch Training starts now!".format(epoch))
with tqdm(total=len(loader.split_ix['train']), initial=start_Img_idx) as pbar:
for i in range(start_Img_idx, len(loader.split_ix['train']), opt.batch_size):
# import ipdb; ipdb.set_trace()
start = time.time()
if (opt.use_warmup == 1) and (iteration < opt.noamopt_warmup):
opt.current_lr = opt.learning_rate * (iteration + 1) / opt.noamopt_warmup
utils.set_lr(optimizer, opt.current_lr)
# Load data from train split (0)
data = loader.get_batch('train')
# print('Read data:', time.time() - start)
if (iteration % acc_steps == 0):
optimizer.zero_grad()
torch.cuda.synchronize()
start = time.time()
tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks']]
tmp = [_ if _ is None else _.cuda() for _ in tmp]
fc_feats, att_feats, labels, masks, att_masks = tmp
model_out = dp_lw_model(fc_feats, att_feats, labels, masks, att_masks, data['gts'], torch.arange(0, len(data['gts'])), sc_flag)
loss = model_out['loss'].mean()
loss_sp = loss / acc_steps
loss_sp.backward()
if ((iteration + 1) % acc_steps == 0):
utils.clip_gradient(optimizer, opt.grad_clip)
optimizer.step()
torch.cuda.synchronize()
train_loss = loss.item()
end = time.time()
# if not sc_flag:
# print("iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
# .format(iteration, epoch, train_loss, end - start))
# else:
# print("iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}"
# .format(iteration, epoch, model_out['reward'].mean(), end - start))
if not sc_flag:
pbar.set_description("iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
.format(iteration, epoch, train_loss, end - start))
else:
pbar.set_description("iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}"
.format(iteration, epoch, model_out['reward'].mean(), end - start))
# Update the iteration and epoch
iteration += 1
pbar.update(opt.batch_size)
if data['bounds']['wrapped']:
# save after each epoch
save_checkpoint(model, infos, optimizer, append=str(epoch))
epoch += 1
# infos['epoch'] = epoch
epoch_done = True
# Write validation result into summary
if (iteration % opt.losses_log_every == 0) and opt.write_summary:
add_summary_value(tb_summary_writer, 'loss/train_loss', train_loss, iteration)
if opt.noamopt:
opt.current_lr = optimizer.rate()
elif opt.reduce_on_plateau:
opt.current_lr = optimizer.current_lr
add_summary_value(tb_summary_writer, 'hyperparam/learning_rate', opt.current_lr, iteration)
add_summary_value(tb_summary_writer, 'hyperparam/scheduled_sampling_prob', model.ss_prob, iteration)
if sc_flag:
add_summary_value(tb_summary_writer, 'avg_reward', model_out['reward'].mean(), iteration)
loss_history[iteration] = train_loss if not sc_flag else model_out['reward'].mean()
lr_history[iteration] = opt.current_lr
ss_prob_history[iteration] = model.ss_prob
# update infos
infos['iter'] = iteration
infos['epoch'] = epoch
infos['iterators'] = loader.iterators
infos['split_ix'] = loader.split_ix
# make evaluation on validation set, and save model
# TODO modify it to evaluate by each epoch
# ipdb.set_trace()
if (iteration % opt.save_checkpoint_every == 0) and eval_ and epoch > 20:
model_path = os.path.join(opt.checkpoint_path, 'model_itr%s.pth' % (iteration))
eval_kwargs = {'split': 'val',
'dataset': opt.input_json,
'model': model_path}
eval_kwargs.update(vars(opt))
val_loss, predictions, lang_stats = eval_utils.eval_split(dp_model, lw_model.crit, loader, eval_kwargs)
if opt.reduce_on_plateau:
if 'CIDEr' in lang_stats:
optimizer.scheduler_step(-lang_stats['CIDEr'])
else:
optimizer.scheduler_step(val_loss)
# Write validation result into summary
if opt.write_summary:
add_summary_value(tb_summary_writer, 'loss/validation loss', val_loss, iteration)
if lang_stats is not None:
bleu_dict = {}
for k, v in lang_stats.items():
if 'Bleu' in k:
bleu_dict[k] = v
if len(bleu_dict) > 0:
tb_summary_writer.add_scalars('val/Bleu', bleu_dict, epoch)
for k, v in lang_stats.items():
if 'Bleu' not in k:
add_summary_value(tb_summary_writer, 'val/' + k, v, iteration)
val_result_history[iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
# Save model if is improving on validation result
if opt.language_eval == 1:
current_score = lang_stats['CIDEr']
else:
current_score = - val_loss
best_flag = False
if best_val_score is None or current_score > best_val_score:
best_val_score = current_score
best_flag = True
# Dump miscalleous informations
infos['best_val_score'] = best_val_score
histories['val_result_history'] = val_result_history
histories['loss_history'] = loss_history
histories['lr_history'] = lr_history
histories['ss_prob_history'] = ss_prob_history
# save_checkpoint(model, infos, optimizer, histories, append=str(iteration))
save_checkpoint(model, infos, optimizer, histories)
# if opt.save_history_ckpt:
# save_checkpoint(model, infos, optimizer, append=str(iteration))
if best_flag:
save_checkpoint(model, infos, optimizer, append='best')
print("update best model at {} iteration--{} epoch".format(iteration, epoch))
start_Img_idx = 0
# if epoch_done: # go through the set, start a new epoch loop
# break
# Stop if reaching max epochs
if epoch >= opt.max_epochs and opt.max_epochs != -1:
print("epoch {} break all".format(epoch))
save_checkpoint(model, infos, optimizer)
tb_summary_writer.close()
print("============{} Training Done !==============".format('Refine' if opt.use_test or opt.use_val else ''))
break
except (RuntimeError, KeyboardInterrupt):
print('Save ckpt on exception ...')
save_checkpoint(model, infos, optimizer, append='_interrupt')
print('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
opt = opts.parse_opt()
# opt.name_append='24'
# if len(opt.name_append) > 0 and opt.name_append[0] != '-':
# opt.name_append = '-' + opt.name_append
# opt.start_from='log/tmp/train_erebos4/log_aoanet_rl'
print("========================start from {}.".format(opt.start_from))
train(opt)