train.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. import copy
  2. import time
  3. from typing import List, Tuple, Optional
  4. import torch
  5. import torch.optim as optim
  6. from torch import nn
  7. from data import AudioVideo
  8. from kissing_detector import KissingDetector
  9. ExperimentResults = Tuple[Optional[nn.Module], List[float], List[float]]
  10. def _get_params_to_update(model: nn.Module,
  11. feature_extract: bool) -> List[nn.parameter.Parameter]:
  12. params_to_update = model.parameters()
  13. if feature_extract:
  14. print('Params to update')
  15. params_to_update = []
  16. for name, param in model.named_parameters():
  17. if param.requires_grad is True:
  18. params_to_update.append(param)
  19. print("*", name)
  20. else:
  21. print('Updating ALL params')
  22. return params_to_update
  23. def train_kd(data_path_base: str,
  24. conv_model_name: Optional[str],
  25. num_epochs: int,
  26. feature_extract: bool,
  27. batch_size: int,
  28. use_vggish: bool = True,
  29. num_workers: int = 4,
  30. shuffle: bool = True,
  31. lr: float = 0.001,
  32. momentum: float = 0.9) -> ExperimentResults:
  33. num_classes = 2
  34. try:
  35. kd = KissingDetector(conv_model_name, num_classes, feature_extract, use_vggish=use_vggish)
  36. except ValueError:
  37. # if the combination is not valid
  38. return None, [-1.0], [-1.0]
  39. params_to_update = _get_params_to_update(kd, feature_extract)
  40. datasets = {set_: AudioVideo(f'{data_path_base}/{set_}') for set_ in ['train', 'val']}
  41. dataloaders_dict = {x: torch.utils.data.DataLoader(datasets[x],
  42. batch_size=batch_size,
  43. shuffle=shuffle, num_workers=num_workers)
  44. for x in ['train', 'val']}
  45. optimizer_ft = optim.SGD(params_to_update, lr=lr, momentum=momentum)
  46. # Setup the loss fxn
  47. criterion = nn.CrossEntropyLoss()
  48. return train_model(kd,
  49. dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs,
  50. is_inception=(conv_model_name == "inception"))
  51. def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False):
  52. since = time.time()
  53. val_acc_history = []
  54. val_f1_history = []
  55. best_model_wts = copy.deepcopy(model.state_dict())
  56. best_acc = 0.0
  57. best_f1 = 0.0
  58. # Detect if we have a GPU available
  59. device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
  60. for epoch in range(num_epochs):
  61. print('Epoch {}/{}'.format(epoch, num_epochs - 1))
  62. print('-' * 10)
  63. # Each epoch has a training and validation phase
  64. for phase in ['train', 'val']:
  65. if phase == 'train':
  66. model.train() # Set model to training mode
  67. else:
  68. model.eval() # Set model to evaluate mode
  69. running_loss = 0.0
  70. running_corrects = 0
  71. running_tp = 0
  72. running_fp = 0
  73. running_fn = 0
  74. # Iterate over data.
  75. for a, v, labels in dataloaders[phase]:
  76. a = a.to(device)
  77. v = v.to(device)
  78. labels = labels.to(device)
  79. # zero the parameter gradients
  80. optimizer.zero_grad()
  81. # forward
  82. # track history if only in train
  83. with torch.set_grad_enabled(phase == 'train'):
  84. # Get model outputs and calculate loss
  85. # Special case for inception because in training it has an auxiliary output. In train
  86. # mode we calculate the loss by summing the final output and the auxiliary output
  87. # but in testing we only consider the final output.
  88. if is_inception and phase == 'train':
  89. # https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
  90. outputs, aux_outputs = model(a, v)
  91. loss1 = criterion(outputs, labels)
  92. loss2 = criterion(aux_outputs, labels)
  93. loss = loss1 + 0.4 * loss2
  94. else:
  95. outputs = model(a, v)
  96. loss = criterion(outputs, labels)
  97. _, preds = torch.max(outputs, 1)
  98. # backward + optimize only if in training phase
  99. if phase == 'train':
  100. loss.backward()
  101. optimizer.step()
  102. # statistics
  103. running_loss += loss.item() * a.size(0)
  104. running_corrects += torch.sum(preds == labels.data)
  105. running_tp += torch.sum((preds == labels.data)[labels.data == 1])
  106. running_fp += torch.sum((preds != labels.data)[labels.data == 1])
  107. running_fn += torch.sum((preds != labels.data)[labels.data == 0])
  108. epoch_loss = running_loss / len(dataloaders[phase].dataset)
  109. n = len(dataloaders[phase].dataset)
  110. epoch_acc = running_corrects.double() / n
  111. tp = running_tp.double()
  112. fp = running_fp.double()
  113. fn = running_fn.double()
  114. p = tp / (tp + fp)
  115. r = tp / (tp + fn)
  116. epoch_f1 = 2 * p * r / (p + r)
  117. print('{} Loss: {:.4f} F1: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_f1, epoch_acc))
  118. # deep copy the model
  119. if phase == 'val' and epoch_acc > best_acc:
  120. best_acc = epoch_acc
  121. if phase == 'val' and epoch_f1 > best_f1:
  122. best_f1 = epoch_f1
  123. best_model_wts = copy.deepcopy(model.state_dict())
  124. if phase == 'val':
  125. val_acc_history.append(float(epoch_acc))
  126. val_f1_history.append(float(epoch_f1))
  127. print()
  128. time_elapsed = time.time() - since
  129. print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
  130. print('Best val F1 : {:4f}'.format(best_f1))
  131. print('Best val Acc : {:4f}'.format(best_acc))
  132. # load best model weights
  133. model.load_state_dict(best_model_wts)
  134. return model, val_acc_history, val_f1_history