general.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502
  1. # General utils
  2. import glob
  3. import os
  4. import platform
  5. import random
  6. import re
  7. import subprocess
  8. import time
  9. from pathlib import Path
  10. import cv2
  11. import math
  12. import numpy as np
  13. import torch
  14. import torchvision
  15. import yaml
  16. from utils.google_utils import gsutil_getsize
  17. from utils.metrics import fitness
  18. from utils.torch_utils import init_torch_seeds
  19. # Settings
  20. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  21. # format short g, %precision=5
  22. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format})
  23. # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
  24. cv2.setNumThreads(0)
  25. def init_seeds(seed=0):
  26. random.seed(seed)
  27. np.random.seed(seed)
  28. init_torch_seeds(seed)
  29. def get_latest_run(search_dir='.'):
  30. # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
  31. last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
  32. return max(last_list, key=os.path.getctime) if last_list else ''
  33. def check_git_status():
  34. # Suggest 'git pull' if repo is out of date
  35. if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'):
  36. s = subprocess.check_output(
  37. 'if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
  38. if 'Your branch is behind' in s:
  39. print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
  40. def check_img_size(img_size, s=32):
  41. # Verify img_size is a multiple of stride s
  42. new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
  43. if new_size != img_size:
  44. print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' %
  45. (img_size, s, new_size))
  46. return new_size
  47. def check_file(file):
  48. # Search for file if not found
  49. if os.path.isfile(file) or file == '':
  50. return file
  51. else:
  52. files = glob.glob('./**/' + file, recursive=True) # find file
  53. assert len(files), 'File Not Found: %s' % file # assert file was found
  54. assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (
  55. file, files) # assert unique
  56. return files[0] # return file
  57. def check_dataset(dict):
  58. # Download dataset if not found locally
  59. val, s = dict.get('val'), dict.get('download')
  60. if val and len(val):
  61. val = [Path(x).resolve()
  62. for x in (val if isinstance(val, list) else [val])] # val path
  63. if not all(x.exists() for x in val):
  64. print('\nWARNING: Dataset not found, nonexistent paths: %s' %
  65. [str(x) for x in val if not x.exists()])
  66. if s and len(s): # download script
  67. print('Downloading %s ...' % s)
  68. if s.startswith('http') and s.endswith('.zip'): # URL
  69. f = Path(s).name # filename
  70. torch.hub.download_url_to_file(s, f)
  71. r = os.system('unzip -q %s -d ../ && rm %s' %
  72. (f, f)) # unzip
  73. else: # bash script
  74. r = os.system(s)
  75. print('Dataset autodownload %s\n' % ('success' if r ==
  76. 0 else 'failure')) # analyze return value
  77. else:
  78. raise Exception('Dataset not found.')
  79. def make_divisible(x, divisor):
  80. # Returns x evenly divisible by divisor
  81. return math.ceil(x / divisor) * divisor
  82. def labels_to_class_weights(labels, nc=80):
  83. # Get class weights (inverse frequency) from training labels
  84. if labels[0] is None: # no labels loaded
  85. return torch.Tensor()
  86. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  87. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  88. weights = np.bincount(classes, minlength=nc) # occurrences per class
  89. # Prepend gridpoint count (for uCE training)
  90. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  91. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  92. weights[weights == 0] = 1 # replace empty bins with 1
  93. weights = 1 / weights # number of targets per class
  94. weights /= weights.sum() # normalize
  95. return torch.from_numpy(weights)
  96. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  97. # Produces image weights based on class_weights and image contents
  98. class_counts = np.array(
  99. [np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
  100. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  101. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  102. return image_weights
  103. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  104. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  105. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  106. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  107. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  108. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  109. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  110. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  111. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  112. return x
  113. def xyxy2xywh(x):
  114. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  115. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  116. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  117. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  118. y[:, 2] = x[:, 2] - x[:, 0] # width
  119. y[:, 3] = x[:, 3] - x[:, 1] # height
  120. return y
  121. def xywh2xyxy(x):
  122. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  123. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  124. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  125. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  126. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  127. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  128. return y
  129. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  130. # Rescale coords (xyxy) from img1_shape to img0_shape
  131. if ratio_pad is None: # calculate from img0_shape
  132. gain = min(img1_shape[0] / img0_shape[0],
  133. img1_shape[1] / img0_shape[1]) # gain = old / new
  134. pad = (img1_shape[1] - img0_shape[1] * gain) / \
  135. 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  136. else:
  137. gain = ratio_pad[0][0]
  138. pad = ratio_pad[1]
  139. coords[:, [0, 2]] -= pad[0] # x padding
  140. coords[:, [1, 3]] -= pad[1] # y padding
  141. coords[:, :4] /= gain
  142. clip_coords(coords, img0_shape)
  143. return coords
  144. def clip_coords(boxes, img_shape):
  145. # Clip bounding xyxy bounding boxes to image shape (height, width)
  146. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  147. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  148. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  149. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  150. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
  151. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  152. box2 = box2.T
  153. # Get the coordinates of bounding boxes
  154. if x1y1x2y2: # x1, y1, x2, y2 = box1
  155. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  156. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  157. else: # transform from xywh to xyxy
  158. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  159. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  160. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  161. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  162. # Intersection area
  163. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  164. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  165. # Union Area
  166. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
  167. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
  168. union = w1 * h1 + w2 * h2 - inter + eps
  169. iou = inter / union
  170. if GIoU or DIoU or CIoU:
  171. # convex (smallest enclosing box) width
  172. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1)
  173. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  174. if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  175. c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
  176. rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
  177. (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
  178. if DIoU:
  179. return iou - rho2 / c2 # DIoU
  180. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  181. v = (4 / math.pi ** 2) * \
  182. torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  183. with torch.no_grad():
  184. alpha = v / ((1 + eps) - iou + v)
  185. return iou - (rho2 / c2 + v * alpha) # CIoU
  186. else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
  187. c_area = cw * ch + eps # convex area
  188. return iou - (c_area - union) / c_area # GIoU
  189. else:
  190. return iou # IoU
  191. def box_iou(box1, box2):
  192. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  193. """
  194. Return intersection-over-union (Jaccard index) of boxes.
  195. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  196. Arguments:
  197. box1 (Tensor[N, 4])
  198. box2 (Tensor[M, 4])
  199. Returns:
  200. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  201. IoU values for every element in boxes1 and boxes2
  202. """
  203. def box_area(box):
  204. # box = 4xn
  205. return (box[2] - box[0]) * (box[3] - box[1])
  206. area1 = box_area(box1.T)
  207. area2 = box_area(box2.T)
  208. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  209. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) -
  210. torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  211. # iou = inter / (area1 + area2 - inter)
  212. return inter / (area1[:, None] + area2 - inter)
  213. def wh_iou(wh1, wh2):
  214. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  215. wh1 = wh1[:, None] # [N,1,2]
  216. wh2 = wh2[None] # [1,M,2]
  217. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  218. # iou = inter / (area1 + area2 - inter)
  219. return inter / (wh1.prod(2) + wh2.prod(2) - inter)
  220. def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, classes=None, agnostic=False, labels=()):
  221. """Performs Non-Maximum Suppression (NMS) on inference results
  222. Returns:
  223. detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
  224. """
  225. nc = prediction[0].shape[1] - 5 # number of classes
  226. xc = prediction[..., 4] > conf_thres # candidates
  227. # Settings
  228. # (pixels) minimum and maximum box width and height
  229. min_wh, max_wh = 2, 4096
  230. max_det = 300 # maximum number of detections per image
  231. time_limit = 10.0 # seconds to quit after
  232. redundant = True # require redundant detections
  233. multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
  234. merge = False # use merge-NMS
  235. t = time.time()
  236. output = [torch.zeros(0, 6)] * prediction.shape[0]
  237. for xi, x in enumerate(prediction): # image index, image inference
  238. # Apply constraints
  239. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  240. x = x[xc[xi]] # confidence
  241. # Cat apriori labels if autolabelling
  242. if labels and len(labels[xi]):
  243. l = labels[xi]
  244. v = torch.zeros((len(l), nc + 5), device=x.device)
  245. v[:, :4] = l[:, 1:5] # box
  246. v[:, 4] = 1.0 # conf
  247. v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
  248. x = torch.cat((x, v), 0)
  249. # If none remain process next image
  250. if not x.shape[0]:
  251. continue
  252. # Compute conf
  253. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  254. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  255. box = xywh2xyxy(x[:, :4])
  256. # Detections matrix nx6 (xyxy, conf, cls)
  257. if multi_label:
  258. i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
  259. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  260. else: # best class only
  261. conf, j = x[:, 5:].max(1, keepdim=True)
  262. x = torch.cat((box, conf, j.float()), 1)[
  263. conf.view(-1) > conf_thres]
  264. # Filter by class
  265. if classes:
  266. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  267. # Apply finite constraint
  268. # if not torch.isfinite(x).all():
  269. # x = x[torch.isfinite(x).all(1)]
  270. # If none remain process next image
  271. n = x.shape[0] # number of boxes
  272. if not n:
  273. continue
  274. # Sort by confidence
  275. # x = x[x[:, 4].argsort(descending=True)]
  276. # Batched NMS
  277. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  278. # boxes (offset by class), scores
  279. boxes, scores = x[:, :4] + c, x[:, 4]
  280. i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
  281. if i.shape[0] > max_det: # limit detections
  282. i = i[:max_det]
  283. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  284. # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  285. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  286. weights = iou * scores[None] # box weights
  287. x[i, :4] = torch.mm(weights, x[:, :4]).float(
  288. ) / weights.sum(1, keepdim=True) # merged boxes
  289. if redundant:
  290. i = i[iou.sum(1) > 1] # require redundancy
  291. output[xi] = x[i]
  292. if (time.time() - t) > time_limit:
  293. break # time limit exceeded
  294. return output
  295. # from utils.general import *; strip_optimizer()
  296. def strip_optimizer(f='weights/best.pt', s=''):
  297. # Strip optimizer from 'f' to finalize training, optionally save as 's'
  298. x = torch.load(f, map_location=torch.device('cpu'))
  299. x['optimizer'] = None
  300. x['training_results'] = None
  301. x['epoch'] = -1
  302. x['model'].half() # to FP16
  303. for p in x['model'].parameters():
  304. p.requires_grad = False
  305. torch.save(x, s or f)
  306. mb = os.path.getsize(s or f) / 1E6 # filesize
  307. print('Optimizer stripped from %s,%s %.1fMB' %
  308. (f, (' saved as %s,' % s) if s else '', mb))
  309. def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
  310. # Print mutation results to evolve.txt (for use with train.py --evolve)
  311. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  312. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  313. # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  314. c = '%10.4g' * len(results) % results
  315. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  316. if bucket:
  317. url = 'gs://%s/evolve.txt' % bucket
  318. if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
  319. # download evolve.txt if larger than local
  320. os.system('gsutil cp %s .' % url)
  321. with open('evolve.txt', 'a') as f: # append result
  322. f.write(c + b + '\n')
  323. x = np.unique(np.loadtxt('evolve.txt', ndmin=2),
  324. axis=0) # load unique rows
  325. x = x[np.argsort(-fitness(x))] # sort
  326. np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
  327. # Save yaml
  328. for i, k in enumerate(hyp.keys()):
  329. hyp[k] = float(x[0, i + 7])
  330. with open(yaml_file, 'w') as f:
  331. results = tuple(x[0, :7])
  332. # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  333. c = '%10.4g' * len(results) % results
  334. f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(
  335. x) + c + '\n\n')
  336. yaml.dump(hyp, f, sort_keys=False)
  337. if bucket:
  338. os.system('gsutil cp evolve.txt %s gs://%s' %
  339. (yaml_file, bucket)) # upload
  340. def apply_classifier(x, model, img, im0):
  341. # applies a second stage classifier to yolo outputs
  342. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  343. for i, d in enumerate(x): # per image
  344. if d is not None and len(d):
  345. d = d.clone()
  346. # Reshape and pad cutouts
  347. b = xyxy2xywh(d[:, :4]) # boxes
  348. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  349. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  350. d[:, :4] = xywh2xyxy(b).long()
  351. # Rescale boxes from img_size to im0 size
  352. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  353. # Classes
  354. pred_cls1 = d[:, 5].long()
  355. ims = []
  356. for j, a in enumerate(d): # per item
  357. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  358. im = cv2.resize(cutout, (224, 224)) # BGR
  359. # cv2.imwrite('test%i.jpg' % j, cutout)
  360. # BGR to RGB, to 3x416x416
  361. im = im[:, :, ::-1].transpose(2, 0, 1)
  362. im = np.ascontiguousarray(
  363. im, dtype=np.float32) # uint8 to float32
  364. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  365. ims.append(im)
  366. pred_cls2 = model(torch.Tensor(ims).to(d.device)
  367. ).argmax(1) # classifier prediction
  368. # retain matching class detections
  369. x[i] = x[i][pred_cls1 == pred_cls2]
  370. return x
  371. def increment_path(path, exist_ok=True, sep=''):
  372. # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
  373. path = Path(path) # os-agnostic
  374. if (path.exists() and exist_ok) or (not path.exists()):
  375. return str(path)
  376. else:
  377. dirs = glob.glob(f"{path}{sep}*") # similar paths
  378. matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
  379. i = [int(m.groups()[0]) for m in matches if m] # indices
  380. n = max(i) + 1 if i else 2 # increment number
  381. return f"{path}{sep}{n}" # update path
  382. def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
  383. shape = img.shape[:2] # current shape [height, width]
  384. if isinstance(new_shape, int):
  385. new_shape = (new_shape, new_shape)
  386. # Scale ratio (new / old)
  387. r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
  388. if not scaleup: # only scale down, do not scale up (for better test mAP)
  389. r = min(r, 1.0)
  390. # Compute padding
  391. ratio = r, r # width, height ratios
  392. new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
  393. dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - \
  394. new_unpad[1] # wh padding
  395. if auto: # minimum rectangle
  396. dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
  397. elif scaleFill: # stretch
  398. dw, dh = 0.0, 0.0
  399. new_unpad = (new_shape[1], new_shape[0])
  400. ratio = new_shape[1] / shape[1], new_shape[0] / \
  401. shape[0] # width, height ratios
  402. dw /= 2 # divide padding into 2 sides
  403. dh /= 2
  404. if shape[::-1] != new_unpad: # resize
  405. img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
  406. top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
  407. left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
  408. img = cv2.copyMakeBorder(
  409. img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
  410. return img, ratio, (dw, dh)