Code Refactor ruff check --fix --extend-select I (#13672)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
c8514a6754
commit
6227d8f8a1
6 changed files with 30 additions and 24 deletions
18
.github/workflows/publish.yml
vendored
18
.github/workflows/publish.yml
vendored
|
|
@ -88,7 +88,7 @@ jobs:
|
|||
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
|
||||
CURRENT_TAG = os.getenv('CURRENT_TAG')
|
||||
PREVIOUS_TAG = os.getenv('PREVIOUS_TAG')
|
||||
|
||||
|
||||
# Check for required environment variables
|
||||
if not all([OPENAI_AZURE_API_KEY, OPENAI_AZURE_ENDPOINT, OPENAI_AZURE_API_VERSION, GITHUB_TOKEN, CURRENT_TAG, PREVIOUS_TAG]):
|
||||
print(OPENAI_AZURE_API_KEY)
|
||||
|
|
@ -98,24 +98,24 @@ jobs:
|
|||
print(CURRENT_TAG)
|
||||
print(PREVIOUS_TAG)
|
||||
raise ValueError("One or more required environment variables are missing.")
|
||||
|
||||
|
||||
latest_tag = f"v{CURRENT_TAG}"
|
||||
previous_tag = f"v{PREVIOUS_TAG}"
|
||||
repo = 'ultralytics/ultralytics'
|
||||
headers = {"Authorization": f"token {GITHUB_TOKEN}", "Accept": "application/vnd.github.v3.diff"}
|
||||
|
||||
|
||||
# Get the diff between the tags
|
||||
url = f"https://api.github.com/repos/{repo}/compare/{previous_tag}...{latest_tag}"
|
||||
response = requests.get(url, headers=headers)
|
||||
diff = response.text if response.status_code == 200 else f"Failed to get diff: {response.content}"
|
||||
|
||||
|
||||
# Set up OpenAI client
|
||||
client = openai.AzureOpenAI(
|
||||
api_key=OPENAI_AZURE_API_KEY,
|
||||
api_version=OPENAI_AZURE_API_VERSION,
|
||||
azure_endpoint=OPENAI_AZURE_ENDPOINT
|
||||
)
|
||||
|
||||
|
||||
# Prepare messages for OpenAI completion
|
||||
messages = [
|
||||
{
|
||||
|
|
@ -131,17 +131,17 @@ jobs:
|
|||
f"\n\nHere's the release diff:\n\n{diff[:96000]}",
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
try:
|
||||
completion = client.chat.completions.create(model="gpt-4o-2024-05-13", messages=messages)
|
||||
summary = completion.choices[0].message.content.strip()
|
||||
except openai.error.OpenAIError as e:
|
||||
print(f"Failed to generate summary: {e}")
|
||||
raise
|
||||
|
||||
|
||||
# Get the latest commit message
|
||||
commit_message = subprocess.run(['git', 'log', '-1', '--pretty=%B'], check=True, text=True, capture_output=True).stdout.split("\n")[0].strip()
|
||||
|
||||
|
||||
# Prepare release data
|
||||
release = {
|
||||
'tag_name': latest_tag,
|
||||
|
|
@ -150,7 +150,7 @@ jobs:
|
|||
'draft': False,
|
||||
'prerelease': False
|
||||
}
|
||||
|
||||
|
||||
# Create the release on GitHub
|
||||
release_url = f"https://api.github.com/repos/{repo}/releases"
|
||||
release_response = requests.post(release_url, headers=headers, data=json.dumps(release))
|
||||
|
|
|
|||
|
|
@ -11,6 +11,10 @@ keywords: MLflow, Ultralytics YOLO, logging, metrics, parameters, model artifact
|
|||
|
||||
<br><br>
|
||||
|
||||
## ::: ultralytics.utils.callbacks.mlflow.sanitize_dict
|
||||
|
||||
<br><br>
|
||||
|
||||
## ::: ultralytics.utils.callbacks.mlflow.on_pretrain_routine_end
|
||||
|
||||
<br><br>
|
||||
|
|
|
|||
|
|
@ -221,8 +221,7 @@ names:
|
|||
204: cape
|
||||
205: cappuccino/coffee cappuccino
|
||||
206: car/car automobile/auto/auto automobile/automobile
|
||||
207: railcar/railcar part of a train/railway car/railway car part of a train/railroad
|
||||
car/railroad car part of a train
|
||||
207: railcar/railcar part of a train/railway car/railway car part of a train/railroad car/railroad car part of a train
|
||||
208: elevator car
|
||||
209: car battery/automobile battery
|
||||
210: identity card
|
||||
|
|
@ -241,8 +240,7 @@ names:
|
|||
223: cast/plaster cast/plaster bandage
|
||||
224: cat
|
||||
225: cauliflower
|
||||
226: cayenne/cayenne spice/cayenne pepper/cayenne pepper spice/red pepper/red pepper
|
||||
spice
|
||||
226: cayenne/cayenne spice/cayenne pepper/cayenne pepper spice/red pepper/red pepper spice
|
||||
227: CD player
|
||||
228: celery
|
||||
229: cellular telephone/cellular phone/cellphone/mobile phone/smart phone
|
||||
|
|
@ -258,8 +256,7 @@ names:
|
|||
239: chessboard
|
||||
240: chicken/chicken animal
|
||||
241: chickpea/garbanzo
|
||||
242: chili/chili vegetable/chili pepper/chili pepper vegetable/chilli/chilli vegetable/chilly/chilly
|
||||
vegetable/chile/chile vegetable
|
||||
242: chili/chili vegetable/chili pepper/chili pepper vegetable/chilli/chilli vegetable/chilly/chilly vegetable/chile/chile vegetable
|
||||
243: chime/gong
|
||||
244: chinaware
|
||||
245: crisp/crisp potato chip/potato chip
|
||||
|
|
@ -1061,8 +1058,7 @@ names:
|
|||
1041: sweater
|
||||
1042: sweatshirt
|
||||
1043: sweet potato
|
||||
1044: swimsuit/swimwear/bathing suit/swimming costume/bathing costume/swimming trunks/bathing
|
||||
trunks
|
||||
1044: swimsuit/swimwear/bathing suit/swimming costume/bathing costume/swimming trunks/bathing trunks
|
||||
1045: sword
|
||||
1046: syringe
|
||||
1047: Tabasco sauce
|
||||
|
|
|
|||
|
|
@ -259,7 +259,7 @@ def layout():
|
|||
|
||||
with col2:
|
||||
similarity_form(selected_imgs)
|
||||
display_labels = st.checkbox("Labels", value=False, key="display_labels")
|
||||
st.checkbox("Labels", value=False, key="display_labels")
|
||||
utralytics_explorer_docs_callback()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -293,8 +293,12 @@ class DetectionModel(BaseModel):
|
|||
if isinstance(m, Detect): # includes all Detect subclasses like Segment, Pose, OBB, WorldDetect
|
||||
s = 256 # 2x min stride
|
||||
m.inplace = self.inplace
|
||||
forward = lambda x: self.forward(x)[0] if isinstance(m, (Segment, Pose, OBB)) else self.forward(x)
|
||||
m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
|
||||
|
||||
def _forward(x):
|
||||
"""Performs a forward pass through the model, handling different Detect subclass types accordingly."""
|
||||
return self.forward(x)[0] if isinstance(m, (Segment, Pose, OBB)) else self.forward(x)
|
||||
|
||||
m.stride = torch.tensor([s / x.shape[-2] for x in _forward(torch.zeros(1, ch, s, s))]) # forward
|
||||
self.stride = m.stride
|
||||
m.bias_init() # only run once
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -34,11 +34,13 @@ try:
|
|||
from pathlib import Path
|
||||
|
||||
PREFIX = colorstr("MLflow: ")
|
||||
SANITIZE = lambda x: {k.replace("(", "").replace(")", ""): float(v) for k, v in x.items()}
|
||||
|
||||
except (ImportError, AssertionError):
|
||||
mlflow = None
|
||||
|
||||
def sanitize_dict(x):
|
||||
"""Sanitize dictionary keys by removing parentheses and converting values to floats."""
|
||||
return {k.replace("(", "").replace(")", ""): float(v) for k, v in x.items()}
|
||||
|
||||
def on_pretrain_routine_end(trainer):
|
||||
"""
|
||||
|
|
@ -88,8 +90,8 @@ def on_train_epoch_end(trainer):
|
|||
if mlflow:
|
||||
mlflow.log_metrics(
|
||||
metrics={
|
||||
**SANITIZE(trainer.lr),
|
||||
**SANITIZE(trainer.label_loss_items(trainer.tloss, prefix="train")),
|
||||
**sanitize_dict(trainer.lr),
|
||||
**sanitize_dict(trainer.label_loss_items(trainer.tloss, prefix="train")),
|
||||
},
|
||||
step=trainer.epoch,
|
||||
)
|
||||
|
|
@ -98,7 +100,7 @@ def on_train_epoch_end(trainer):
|
|||
def on_fit_epoch_end(trainer):
|
||||
"""Log training metrics at the end of each fit epoch to MLflow."""
|
||||
if mlflow:
|
||||
mlflow.log_metrics(metrics=SANITIZE(trainer.metrics), step=trainer.epoch)
|
||||
mlflow.log_metrics(metrics=sanitize_dict(trainer.metrics), step=trainer.epoch)
|
||||
|
||||
|
||||
def on_train_end(trainer):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue