Ultralytics Code Refactor https://ultralytics.com/actions (#16940)
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
0027e0837c
commit
a622b404ef
19 changed files with 32 additions and 33 deletions
2
.github/workflows/cla.yml
vendored
2
.github/workflows/cla.yml
vendored
|
|
@ -30,7 +30,7 @@ jobs:
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
# Must be repository secret PAT
|
# Must be repository secret PAT
|
||||||
PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
|
PERSONAL_ACCESS_TOKEN: ${{ secrets._GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
path-to-signatures: "signatures/version1/cla.json"
|
path-to-signatures: "signatures/version1/cla.json"
|
||||||
path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document
|
path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document
|
||||||
|
|
|
||||||
2
.github/workflows/docker.yaml
vendored
2
.github/workflows/docker.yaml
vendored
|
|
@ -182,7 +182,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- name: Trigger Additional GitHub Actions
|
- name: Trigger Additional GitHub Actions
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
|
GH_TOKEN: ${{ secrets._GITHUB_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
sleep 60
|
sleep 60
|
||||||
gh workflow run deploy_cloud_run.yml \
|
gh workflow run deploy_cloud_run.yml \
|
||||||
|
|
|
||||||
4
.github/workflows/docs.yml
vendored
4
.github/workflows/docs.yml
vendored
|
|
@ -34,7 +34,7 @@ jobs:
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }}
|
repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }}
|
||||||
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}
|
token: ${{ secrets._GITHUB_TOKEN }}
|
||||||
ref: ${{ github.head_ref || github.ref }}
|
ref: ${{ github.head_ref || github.ref }}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
|
|
@ -94,5 +94,5 @@ jobs:
|
||||||
else
|
else
|
||||||
LATEST_HASH=$(git rev-parse --short=7 HEAD)
|
LATEST_HASH=$(git rev-parse --short=7 HEAD)
|
||||||
git commit -m "Update Docs for 'ultralytics ${{ steps.check_pypi.outputs.version }} - $LATEST_HASH'"
|
git commit -m "Update Docs for 'ultralytics ${{ steps.check_pypi.outputs.version }} - $LATEST_HASH'"
|
||||||
git push https://${{ secrets.PERSONAL_ACCESS_TOKEN }}@github.com/ultralytics/docs.git gh-pages
|
git push https://${{ secrets._GITHUB_TOKEN }}@github.com/ultralytics/docs.git gh-pages
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
5
.github/workflows/format.yml
vendored
5
.github/workflows/format.yml
vendored
|
|
@ -20,15 +20,14 @@ jobs:
|
||||||
- name: Run Ultralytics Formatting
|
- name: Run Ultralytics Formatting
|
||||||
uses: ultralytics/actions@main
|
uses: ultralytics/actions@main
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated
|
token: ${{ secrets._GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated
|
||||||
labels: true # autolabel issues and PRs
|
labels: true # autolabel issues and PRs
|
||||||
python: true # format Python code and docstrings
|
python: true # format Python code and docstrings
|
||||||
prettier: true # format YAML, JSON, Markdown and CSS
|
prettier: true # format YAML, JSON, Markdown and CSS
|
||||||
spelling: true # check spelling
|
spelling: true # check spelling
|
||||||
links: false # check broken links
|
links: false # check broken links
|
||||||
summary: true # print PR summary with GPT4o (requires 'openai_api_key')
|
summary: true # print PR summary with GPT4o (requires 'openai_api_key')
|
||||||
openai_azure_api_key: ${{ secrets.OPENAI_AZURE_API_KEY }}
|
openai_api_key: ${{ secrets.OPENAI_API_KEY }}
|
||||||
openai_azure_endpoint: ${{ secrets.OPENAI_AZURE_ENDPOINT }}
|
|
||||||
first_issue_response: |
|
first_issue_response: |
|
||||||
👋 Hello @${{ github.actor }}, thank you for your interest in Ultralytics 🚀! We recommend a visit to the [Docs](https://docs.ultralytics.com) for new users where you can find many [Python](https://docs.ultralytics.com/usage/python/) and [CLI](https://docs.ultralytics.com/usage/cli/) usage examples and where many of the most common questions may already be answered.
|
👋 Hello @${{ github.actor }}, thank you for your interest in Ultralytics 🚀! We recommend a visit to the [Docs](https://docs.ultralytics.com) for new users where you can find many [Python](https://docs.ultralytics.com/usage/python/) and [CLI](https://docs.ultralytics.com/usage/cli/) usage examples and where many of the most common questions may already be answered.
|
||||||
|
|
||||||
|
|
|
||||||
2
.github/workflows/merge-main-into-prs.yml
vendored
2
.github/workflows/merge-main-into-prs.yml
vendored
|
|
@ -33,7 +33,7 @@ jobs:
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
|
|
||||||
g = Github("${{ secrets.PERSONAL_ACCESS_TOKEN }}")
|
g = Github("${{ secrets._GITHUB_TOKEN }}")
|
||||||
repo = g.get_repo("${{ github.repository }}")
|
repo = g.get_repo("${{ github.repository }}")
|
||||||
|
|
||||||
# Fetch the default branch name
|
# Fetch the default branch name
|
||||||
|
|
|
||||||
6
.github/workflows/publish.yml
vendored
6
.github/workflows/publish.yml
vendored
|
|
@ -23,7 +23,7 @@ jobs:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} # use your PAT here
|
token: ${{ secrets._GITHUB_TOKEN }} # use your PAT here
|
||||||
- name: Git config
|
- name: Git config
|
||||||
run: |
|
run: |
|
||||||
git config --global user.name "UltralyticsAssistant"
|
git config --global user.name "UltralyticsAssistant"
|
||||||
|
|
@ -103,7 +103,7 @@ jobs:
|
||||||
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
|
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
|
||||||
env:
|
env:
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets._GITHUB_TOKEN }}
|
||||||
CURRENT_TAG: ${{ steps.check_pypi.outputs.current_tag }}
|
CURRENT_TAG: ${{ steps.check_pypi.outputs.current_tag }}
|
||||||
PREVIOUS_TAG: ${{ steps.check_pypi.outputs.previous_tag }}
|
PREVIOUS_TAG: ${{ steps.check_pypi.outputs.previous_tag }}
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -111,7 +111,7 @@ jobs:
|
||||||
shell: bash
|
shell: bash
|
||||||
- name: Extract PR Details
|
- name: Extract PR Details
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}
|
GH_TOKEN: ${{ secrets._GITHUB_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
# Check if the event is a pull request or pull_request_target
|
# Check if the event is a pull request or pull_request_target
|
||||||
if [ "${{ github.event_name }}" = "pull_request" ] || [ "${{ github.event_name }}" = "pull_request_target" ]; then
|
if [ "${{ github.event_name }}" = "pull_request" ] || [ "${{ github.event_name }}" = "pull_request_target" ]; then
|
||||||
|
|
|
||||||
|
|
@ -210,7 +210,7 @@ These features help in tracking experiments, optimizing models, and collaboratin
|
||||||
After running your training script with W&B integration:
|
After running your training script with W&B integration:
|
||||||
|
|
||||||
1. A link to your W&B dashboard will be provided in the console output.
|
1. A link to your W&B dashboard will be provided in the console output.
|
||||||
2. Click on the link or go to [wandb.ai](https://wandb.ai) and log in to your account.
|
2. Click on the link or go to [wandb.ai](https://wandb.ai/) and log in to your account.
|
||||||
3. Navigate to your project to view detailed metrics, visualizations, and model performance data.
|
3. Navigate to your project to view detailed metrics, visualizations, and model performance data.
|
||||||
|
|
||||||
The dashboard offers insights into your model's training process, allowing you to analyze and improve your YOLO11 models effectively.
|
The dashboard offers insights into your model's training process, allowing you to analyze and improve your YOLO11 models effectively.
|
||||||
|
|
|
||||||
|
|
@ -639,7 +639,7 @@ def smart_value(v):
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
return eval(v)
|
return eval(v)
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ def exif_size(img: Image.Image):
|
||||||
rotation = exif.get(274, None) # the EXIF key for the orientation tag is 274
|
rotation = exif.get(274, None) # the EXIF key for the orientation tag is 274
|
||||||
if rotation in {6, 8}: # rotation 270 or 90
|
if rotation in {6, 8}: # rotation 270 or 90
|
||||||
s = s[1], s[0]
|
s = s[1], s[0]
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
pass
|
pass
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ def default_class_names(data=None):
|
||||||
if data:
|
if data:
|
||||||
try:
|
try:
|
||||||
return yaml_load(check_yaml(data))["names"]
|
return yaml_load(check_yaml(data))["names"]
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
pass
|
pass
|
||||||
return {i: f"class{i}" for i in range(999)} # return default if above errors
|
return {i: f"class{i}" for i in range(999)} # return default if above errors
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -963,7 +963,6 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
|
||||||
args[j] = locals()[a] if a in locals() else ast.literal_eval(a)
|
args[j] = locals()[a] if a in locals() else ast.literal_eval(a)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain
|
n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain
|
||||||
if m in {
|
if m in {
|
||||||
Classify,
|
Classify,
|
||||||
|
|
@ -1102,7 +1101,7 @@ def guess_model_scale(model_path):
|
||||||
(str): The size character of the model's scale, which can be n, s, m, l, or x.
|
(str): The size character of the model's scale, which can be n, s, m, l, or x.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) # n, s, m, l, or x
|
return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) # noqa, returns n, s, m, l, or x
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
@ -1139,7 +1138,7 @@ def guess_model_task(model):
|
||||||
if isinstance(model, dict):
|
if isinstance(model, dict):
|
||||||
try:
|
try:
|
||||||
return cfg2task(model)
|
return cfg2task(model)
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Guess from PyTorch model
|
# Guess from PyTorch model
|
||||||
|
|
@ -1147,12 +1146,12 @@ def guess_model_task(model):
|
||||||
for x in "model.args", "model.model.args", "model.model.model.args":
|
for x in "model.args", "model.model.args", "model.model.model.args":
|
||||||
try:
|
try:
|
||||||
return eval(x)["task"]
|
return eval(x)["task"]
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
pass
|
pass
|
||||||
for x in "model.yaml", "model.model.yaml", "model.model.model.yaml":
|
for x in "model.yaml", "model.model.yaml", "model.model.model.yaml":
|
||||||
try:
|
try:
|
||||||
return cfg2task(eval(x))
|
return cfg2task(eval(x))
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
for m in model.modules():
|
for m in model.modules():
|
||||||
|
|
|
||||||
|
|
@ -61,11 +61,11 @@ class Analytics(BaseSolution):
|
||||||
self.extract_tracks(im0) # Extract tracks
|
self.extract_tracks(im0) # Extract tracks
|
||||||
|
|
||||||
if self.type == "line":
|
if self.type == "line":
|
||||||
for box in self.boxes:
|
for _ in self.boxes:
|
||||||
self.total_counts += 1
|
self.total_counts += 1
|
||||||
im0 = self.update_graph(frame_number=frame_number)
|
im0 = self.update_graph(frame_number=frame_number)
|
||||||
self.total_counts = 0
|
self.total_counts = 0
|
||||||
elif self.type == "pie" or self.type == "bar" or self.type == "area":
|
elif self.type in {"pie", "bar", "area"}:
|
||||||
self.clswise_count = {}
|
self.clswise_count = {}
|
||||||
for box, cls in zip(self.boxes, self.clss):
|
for box, cls in zip(self.boxes, self.clss):
|
||||||
if self.names[int(cls)] in self.clswise_count:
|
if self.names[int(cls)] in self.clswise_count:
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,8 @@ class Heatmap(ObjectCounter):
|
||||||
Returns:
|
Returns:
|
||||||
im0 (ndarray): Processed image for further usage
|
im0 (ndarray): Processed image for further usage
|
||||||
"""
|
"""
|
||||||
self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99 if not self.initialized else self.heatmap
|
if not self.initialized:
|
||||||
|
self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99
|
||||||
self.initialized = True # Initialize heatmap only once
|
self.initialized = True # Initialize heatmap only once
|
||||||
|
|
||||||
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
|
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
|
||||||
|
|
|
||||||
|
|
@ -526,7 +526,7 @@ def read_device_model() -> str:
|
||||||
try:
|
try:
|
||||||
with open("/proc/device-tree/model") as f:
|
with open("/proc/device-tree/model") as f:
|
||||||
return f.read()
|
return f.read()
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -584,7 +584,7 @@ def is_docker() -> bool:
|
||||||
try:
|
try:
|
||||||
with open("/proc/self/cgroup") as f:
|
with open("/proc/self/cgroup") as f:
|
||||||
return "docker" in f.read()
|
return "docker" in f.read()
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -623,7 +623,7 @@ def is_online() -> bool:
|
||||||
for dns in ("1.1.1.1", "8.8.8.8"): # check Cloudflare and Google DNS
|
for dns in ("1.1.1.1", "8.8.8.8"): # check Cloudflare and Google DNS
|
||||||
socket.create_connection(address=(dns, 80), timeout=2.0).close()
|
socket.create_connection(address=(dns, 80), timeout=2.0).close()
|
||||||
return True
|
return True
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@ def _log_tensorboard_graph(trainer):
|
||||||
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
|
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
|
||||||
return
|
return
|
||||||
|
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
# Fallback to TorchScript export steps (RTDETR)
|
# Fallback to TorchScript export steps (RTDETR)
|
||||||
try:
|
try:
|
||||||
model = deepcopy(de_parallel(trainer.model))
|
model = deepcopy(de_parallel(trainer.model))
|
||||||
|
|
|
||||||
|
|
@ -277,7 +277,7 @@ def check_latest_pypi_version(package_name="ultralytics"):
|
||||||
response = requests.get(f"https://pypi.org/pypi/{package_name}/json", timeout=3)
|
response = requests.get(f"https://pypi.org/pypi/{package_name}/json", timeout=3)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
return response.json()["info"]["version"]
|
return response.json()["info"]["version"]
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -299,7 +299,7 @@ def check_pip_update_available():
|
||||||
f"Update with 'pip install -U ultralytics'"
|
f"Update with 'pip install -U ultralytics'"
|
||||||
)
|
)
|
||||||
return True
|
return True
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
pass
|
pass
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
@ -715,7 +715,7 @@ def git_describe(path=ROOT): # path must be a directory
|
||||||
"""Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe."""
|
"""Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe."""
|
||||||
try:
|
try:
|
||||||
return subprocess.check_output(f"git -C {path} describe --tags --long --always", shell=True).decode()[:-1]
|
return subprocess.check_output(f"git -C {path} describe --tags --long --always", shell=True).decode()[:-1]
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -60,7 +60,7 @@ def is_url(url, check=False):
|
||||||
with request.urlopen(url) as response:
|
with request.urlopen(url) as response:
|
||||||
return response.getcode() == 200 # check if exists online
|
return response.getcode() == 200 # check if exists online
|
||||||
return True
|
return True
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1117,7 +1117,7 @@ def plot_images(
|
||||||
im[y : y + h, x : x + w, :][mask] = (
|
im[y : y + h, x : x + w, :][mask] = (
|
||||||
im[y : y + h, x : x + w, :][mask] * 0.4 + np.array(color) * 0.6
|
im[y : y + h, x : x + w, :][mask] * 0.4 + np.array(color) * 0.6
|
||||||
)
|
)
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
pass
|
pass
|
||||||
annotator.fromarray(im)
|
annotator.fromarray(im)
|
||||||
if not save:
|
if not save:
|
||||||
|
|
|
||||||
|
|
@ -119,7 +119,7 @@ def get_cpu_info():
|
||||||
info = cpuinfo.get_cpu_info() # info dict
|
info = cpuinfo.get_cpu_info() # info dict
|
||||||
string = info.get(k[0] if k[0] in info else k[1] if k[1] in info else k[2], "unknown")
|
string = info.get(k[0] if k[0] in info else k[1] if k[1] in info else k[2], "unknown")
|
||||||
PERSISTENT_CACHE["cpu_info"] = string.replace("(R)", "").replace("CPU ", "").replace("@ ", "")
|
PERSISTENT_CACHE["cpu_info"] = string.replace("(R)", "").replace("CPU ", "").replace("@ ", "")
|
||||||
except: # noqa E722
|
except Exception:
|
||||||
pass
|
pass
|
||||||
return PERSISTENT_CACHE.get("cpu_info", "unknown")
|
return PERSISTENT_CACHE.get("cpu_info", "unknown")
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue