Refactor Python code (#13448)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
6a234f3639
commit
1b26838def
22 changed files with 81 additions and 101 deletions
|
|
@ -209,11 +209,12 @@ class Exporter:
|
|||
if self.args.optimize:
|
||||
assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
|
||||
assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
|
||||
if edgetpu and not LINUX:
|
||||
raise SystemError("Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler/")
|
||||
elif edgetpu and self.args.batch != 1: # see github.com/ultralytics/ultralytics/pull/13420
|
||||
LOGGER.warning("WARNING ⚠️ Edge TPU export requires batch size 1, setting batch=1.")
|
||||
self.args.batch = 1
|
||||
if edgetpu:
|
||||
if not LINUX:
|
||||
raise SystemError("Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler")
|
||||
elif self.args.batch != 1: # see github.com/ultralytics/ultralytics/pull/13420
|
||||
LOGGER.warning("WARNING ⚠️ Edge TPU export requires batch size 1, setting batch=1.")
|
||||
self.args.batch = 1
|
||||
if isinstance(model, WorldModel):
|
||||
LOGGER.warning(
|
||||
"WARNING ⚠️ YOLOWorld (original version) export is not supported to any format.\n"
|
||||
|
|
|
|||
|
|
@ -742,11 +742,10 @@ class Model(nn.Module):
|
|||
|
||||
if hasattr(self.model, "names"):
|
||||
return check_class_names(self.model.names)
|
||||
else:
|
||||
if not self.predictor: # export formats will not have predictor defined until predict() is called
|
||||
self.predictor = self._smart_load("predictor")(overrides=self.overrides, _callbacks=self.callbacks)
|
||||
self.predictor.setup_model(model=self.model, verbose=False)
|
||||
return self.predictor.model.names
|
||||
if not self.predictor: # export formats will not have predictor defined until predict() is called
|
||||
self.predictor = self._smart_load("predictor")(overrides=self.overrides, _callbacks=self.callbacks)
|
||||
self.predictor.setup_model(model=self.model, verbose=False)
|
||||
return self.predictor.model.names
|
||||
|
||||
@property
|
||||
def device(self) -> torch.device:
|
||||
|
|
|
|||
|
|
@ -319,13 +319,13 @@ class BasePredictor:
|
|||
frame = self.dataset.count
|
||||
else:
|
||||
match = re.search(r"frame (\d+)/", s[i])
|
||||
frame = int(match.group(1)) if match else None # 0 if frame undetermined
|
||||
frame = int(match[1]) if match else None # 0 if frame undetermined
|
||||
|
||||
self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}"))
|
||||
string += "%gx%g " % im.shape[2:]
|
||||
result = self.results[i]
|
||||
result.save_dir = self.save_dir.__str__() # used in other locations
|
||||
string += result.verbose() + f"{result.speed['inference']:.1f}ms"
|
||||
string += f"{result.verbose()}{result.speed['inference']:.1f}ms"
|
||||
|
||||
# Add predictions to image
|
||||
if self.args.save or self.args.show:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue