train.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. import copy
  2. import time
  3. from typing import List, Tuple, Optional
  4. import torch
  5. import torch.optim as optim
  6. from torch import nn
  7. from data import AudioVideo
  8. from kissing_detector import KissingDetector
  9. ExperimentResults = Tuple[nn.Module, List[float], List[float]]
  10. def _get_params_to_update(model: nn.Module,
  11. feature_extract: bool) -> List[nn.parameter.Parameter]:
  12. params_to_update = model.parameters()
  13. if feature_extract:
  14. print('Params to update')
  15. params_to_update = []
  16. for name, param in model.named_parameters():
  17. if param.requires_grad is True:
  18. params_to_update.append(param)
  19. print("*", name)
  20. else:
  21. print('Updating ALL params')
  22. return params_to_update
  23. def train_kd(data_path_base: str,
  24. conv_model_name: Optional[str],
  25. num_epochs: int,
  26. feature_extract: bool,
  27. batch_size: int,
  28. use_vggish: bool = True,
  29. num_workers: int = 4,
  30. shuffle: bool = True,
  31. lr: float = 0.001,
  32. momentum: float = 0.9) -> ExperimentResults:
  33. num_classes = 2
  34. kd = KissingDetector(conv_model_name, num_classes, feature_extract, use_vggish=use_vggish)
  35. params_to_update = _get_params_to_update(kd, feature_extract)
  36. datasets = {set_: AudioVideo(f'{data_path_base}/{set_}') for set_ in ['train', 'val']}
  37. dataloaders_dict = {x: torch.utils.data.DataLoader(datasets[x],
  38. batch_size=batch_size,
  39. shuffle=shuffle, num_workers=num_workers)
  40. for x in ['train', 'val']}
  41. optimizer_ft = optim.SGD(params_to_update, lr=lr, momentum=momentum)
  42. # Setup the loss fxn
  43. criterion = nn.CrossEntropyLoss()
  44. return train_model(kd,
  45. dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs,
  46. is_inception=(conv_model_name == "inception"))
  47. def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False):
  48. since = time.time()
  49. val_acc_history = []
  50. val_f1_history = []
  51. best_model_wts = copy.deepcopy(model.state_dict())
  52. best_acc = 0.0
  53. best_f1 = 0.0
  54. # Detect if we have a GPU available
  55. device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
  56. for epoch in range(num_epochs):
  57. print('Epoch {}/{}'.format(epoch, num_epochs - 1))
  58. print('-' * 10)
  59. # Each epoch has a training and validation phase
  60. for phase in ['train', 'val']:
  61. if phase == 'train':
  62. model.train() # Set model to training mode
  63. else:
  64. model.eval() # Set model to evaluate mode
  65. running_loss = 0.0
  66. running_corrects = 0
  67. running_tp = 0
  68. running_fp = 0
  69. running_fn = 0
  70. # Iterate over data.
  71. for a, v, labels in dataloaders[phase]:
  72. a = a.to(device)
  73. v = v.to(device)
  74. labels = labels.to(device)
  75. # zero the parameter gradients
  76. optimizer.zero_grad()
  77. # forward
  78. # track history if only in train
  79. with torch.set_grad_enabled(phase == 'train'):
  80. # Get model outputs and calculate loss
  81. # Special case for inception because in training it has an auxiliary output. In train
  82. # mode we calculate the loss by summing the final output and the auxiliary output
  83. # but in testing we only consider the final output.
  84. if is_inception and phase == 'train':
  85. # https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
  86. outputs, aux_outputs = model(a, v)
  87. loss1 = criterion(outputs, labels)
  88. loss2 = criterion(aux_outputs, labels)
  89. loss = loss1 + 0.4 * loss2
  90. else:
  91. outputs = model(a, v)
  92. loss = criterion(outputs, labels)
  93. _, preds = torch.max(outputs, 1)
  94. # backward + optimize only if in training phase
  95. if phase == 'train':
  96. loss.backward()
  97. optimizer.step()
  98. # statistics
  99. running_loss += loss.item() * a.size(0)
  100. running_corrects += torch.sum(preds == labels.data)
  101. running_tp += torch.sum((preds == labels.data)[labels.data == 1])
  102. running_fp += torch.sum((preds != labels.data)[labels.data == 1])
  103. running_fn += torch.sum((preds != labels.data)[labels.data == 0])
  104. epoch_loss = running_loss / len(dataloaders[phase].dataset)
  105. n = len(dataloaders[phase].dataset)
  106. epoch_acc = running_corrects.double() / n
  107. tp = running_tp.double()
  108. fp = running_fp.double()
  109. fn = running_fn.double()
  110. p = tp / (tp + fp)
  111. r = tp / (tp + fn)
  112. epoch_f1 = 2 * p * r / (p + r)
  113. print('{} Loss: {:.4f} F1: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_f1, epoch_acc))
  114. # deep copy the model
  115. if phase == 'val' and epoch_acc > best_acc:
  116. best_acc = epoch_acc
  117. if phase == 'val' and epoch_f1 > best_f1:
  118. best_f1 = epoch_f1
  119. best_model_wts = copy.deepcopy(model.state_dict())
  120. if phase == 'val':
  121. val_acc_history.append(float(epoch_acc))
  122. val_f1_history.append(float(epoch_f1))
  123. print()
  124. time_elapsed = time.time() - since
  125. print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
  126. print('Best val F1 : {:4f}'.format(best_f1))
  127. print('Best val Acc : {:4f}'.format(best_acc))
  128. # load best model weights
  129. model.load_state_dict(best_model_wts)
  130. return model, val_acc_history, val_f1_history