ultralytics 8.0.190 add thop>=0.1.1 to requirements.txt (#5162)

Co-authored-by: JohanWesto <Johan.westo@gmail.com>
Co-authored-by: Muhammad Rizwan Munawar <62513924+RizwanMunawar@users.noreply.github.com>
Co-authored-by: StephenBeirlaen <11806615+StephenBeirlaen@users.noreply.github.com>
This commit is contained in:
Glenn Jocher 2023-09-29 21:06:14 +02:00 committed by GitHub
parent 092b58a8cf
commit 9aaa5d5ed0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 28 additions and 11 deletions

View file

@ -791,14 +791,14 @@ def v8_transforms(dataset, imgsz, hyp, stretch=False):
# Classification augmentations -----------------------------------------------------------------------------------------
def classify_transforms(size=224, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)): # IMAGENET_MEAN, IMAGENET_STD
# Transforms to apply if albumentations not installed
def classify_transforms(size=224, rect=False, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)): # IMAGENET_MEAN, IMAGENET_STD
"""Transforms to apply if albumentations not installed."""
if not isinstance(size, int):
raise TypeError(f'classify_transforms() size {size} must be integer, not (list, tuple)')
transforms = [ClassifyLetterBox(size, auto=True) if rect else CenterCrop(size), ToTensor()]
if any(mean) or any(std):
return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(mean, std, inplace=True)])
else:
return T.Compose([CenterCrop(size), ToTensor()])
transforms.append(T.Normalize(mean, std, inplace=True))
return T.Compose(transforms)
def hsv2colorjitter(h, s, v):
@ -864,9 +864,9 @@ class ClassifyLetterBox:
imh, imw = im.shape[:2]
r = min(self.h / imh, self.w / imw) # ratio of new/old
h, w = round(imh * r), round(imw * r) # resized image
hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w
hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else (self.h, self.w)
top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)
im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype)
im_out = np.full((hs, ws, 3), 114, dtype=im.dtype)
im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
return im_out

View file

@ -222,7 +222,7 @@ class ClassificationDataset(torchvision.datasets.ImageFolder):
self.cache_disk = cache == 'disk'
self.samples = self.verify_images() # filter out bad images
self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im
self.torch_transforms = classify_transforms(args.imgsz)
self.torch_transforms = classify_transforms(args.imgsz, rect=args.rect)
self.album_transforms = classify_albumentations(
augment=augment,
size=args.imgsz,