ultralytics 8.3.64 new torchvision.ops access in model YAMLs (#18680)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
a8e2464a9c
commit
673b43ce17
4 changed files with 11 additions and 5 deletions
|
|
@ -8,7 +8,7 @@ keywords: YOLOv8, real-time object detection, YOLO series, Ultralytics, computer
|
|||
|
||||
## Overview
|
||||
|
||||
YOLOv8 is the latest iteration in the YOLO series of real-time object detectors, offering cutting-edge performance in terms of accuracy and speed. Building upon the advancements of previous YOLO versions, YOLOv8 introduces new features and optimizations that make it an ideal choice for various [object detection](https://www.ultralytics.com/glossary/object-detection) tasks in a wide range of applications.
|
||||
YOLOv8 was released by Ultralytic on January 10th, 2023, offering cutting-edge performance in terms of accuracy and speed. Building upon the advancements of previous YOLO versions, YOLOv8 introduced new features and optimizations that make it an ideal choice for various [object detection](https://www.ultralytics.com/glossary/object-detection) tasks in a wide range of applications.
|
||||
|
||||

|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
|
||||
__version__ = "8.3.63"
|
||||
__version__ = "8.3.64"
|
||||
|
||||
import os
|
||||
|
||||
|
|
|
|||
|
|
@ -1139,10 +1139,10 @@ class TorchVision(nn.Module):
|
|||
else:
|
||||
self.m = torchvision.models.__dict__[model](pretrained=bool(weights))
|
||||
if unwrap:
|
||||
layers = list(self.m.children())[:-truncate]
|
||||
layers = list(self.m.children())
|
||||
if isinstance(layers[0], nn.Sequential): # Second-level for some models like EfficientNet, Swin
|
||||
layers = [*list(layers[0].children()), *layers[1:]]
|
||||
self.m = nn.Sequential(*layers)
|
||||
self.m = nn.Sequential(*(layers[:-truncate] if truncate else layers))
|
||||
self.split = split
|
||||
else:
|
||||
self.split = False
|
||||
|
|
|
|||
|
|
@ -955,7 +955,13 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
|
|||
ch = [ch]
|
||||
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
||||
for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args
|
||||
m = getattr(torch.nn, m[3:]) if "nn." in m else globals()[m] # get module
|
||||
m = (
|
||||
getattr(torch.nn, m[3:])
|
||||
if "nn." in m
|
||||
else getattr(__import__("torchvision").ops, m[16:])
|
||||
if "torchvision.ops." in m
|
||||
else globals()[m]
|
||||
) # get module
|
||||
for j, a in enumerate(args):
|
||||
if isinstance(a, str):
|
||||
with contextlib.suppress(ValueError):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue