mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-23 05:24:22 +08:00
Use pathlib
in DOTA ops (#7552)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
f6309b8e70
commit
9d4ffa43bc
@ -32,7 +32,7 @@ RUN pip install --no-cache -e ".[export]" lancedb --extra-index-url https://down
|
||||
RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32
|
||||
RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32
|
||||
# Requires <= Python 3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
|
||||
# RUN pip install --no-cache paddlepaddle==2.4.2 x2paddle
|
||||
# RUN pip install --no-cache paddlepaddle>=2.6.0 x2paddle
|
||||
# Remove exported models
|
||||
RUN rm -rf tmp
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
|
||||
import itertools
|
||||
import os
|
||||
from glob import glob
|
||||
from math import ceil
|
||||
from pathlib import Path
|
||||
@ -73,9 +72,9 @@ def load_yolo_dota(data_root, split="train"):
|
||||
- val
|
||||
"""
|
||||
assert split in ["train", "val"]
|
||||
im_dir = os.path.join(data_root, f"images/{split}")
|
||||
assert Path(im_dir).exists(), f"Can't find {im_dir}, please check your data root."
|
||||
im_files = glob(os.path.join(data_root, f"images/{split}/*"))
|
||||
im_dir = Path(data_root) / "images" / split
|
||||
assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
|
||||
im_files = glob(str(Path(data_root) / "images" / split / "*"))
|
||||
lb_files = img2label_paths(im_files)
|
||||
annos = []
|
||||
for im_file, lb_file in zip(im_files, lb_files):
|
||||
@ -94,7 +93,7 @@ def get_windows(im_size, crop_sizes=[1024], gaps=[200], im_rate_thr=0.6, eps=0.0
|
||||
Args:
|
||||
im_size (tuple): Original image size, (h, w).
|
||||
crop_sizes (List(int)): Crop size of windows.
|
||||
gaps (List(int)): Gap between each crops.
|
||||
gaps (List(int)): Gap between crops.
|
||||
im_rate_thr (float): Threshold of windows areas divided by image ares.
|
||||
"""
|
||||
h, w = im_size
|
||||
@ -173,7 +172,7 @@ def crop_and_save(anno, windows, window_objs, im_dir, lb_dir):
|
||||
patch_im = im[y_start:y_stop, x_start:x_stop]
|
||||
ph, pw = patch_im.shape[:2]
|
||||
|
||||
cv2.imwrite(os.path.join(im_dir, f"{new_name}.jpg"), patch_im)
|
||||
cv2.imwrite(str(Path(im_dir) / f"{new_name}.jpg"), patch_im)
|
||||
label = window_objs[i]
|
||||
if len(label) == 0:
|
||||
continue
|
||||
@ -182,7 +181,7 @@ def crop_and_save(anno, windows, window_objs, im_dir, lb_dir):
|
||||
label[:, 1::2] /= pw
|
||||
label[:, 2::2] /= ph
|
||||
|
||||
with open(os.path.join(lb_dir, f"{new_name}.txt"), "w") as f:
|
||||
with open(Path(lb_dir) / f"{new_name}.txt", "w") as f:
|
||||
for lb in label:
|
||||
formatted_coords = ["{:.6g}".format(coord) for coord in lb[1:]]
|
||||
f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")
|
||||
@ -269,7 +268,7 @@ def split_test(data_root, save_dir, crop_size=1024, gap=200, rates=[1.0]):
|
||||
save_dir = Path(save_dir) / "images" / "test"
|
||||
save_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
im_dir = Path(os.path.join(data_root, "images/test"))
|
||||
im_dir = Path(data_root) / "images" / "test"
|
||||
assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
|
||||
im_files = glob(str(im_dir / "*"))
|
||||
for im_file in tqdm(im_files, total=len(im_files), desc="test"):
|
||||
@ -281,15 +280,9 @@ def split_test(data_root, save_dir, crop_size=1024, gap=200, rates=[1.0]):
|
||||
x_start, y_start, x_stop, y_stop = window.tolist()
|
||||
new_name = f"{name}__{x_stop - x_start}__{x_start}___{y_start}"
|
||||
patch_im = im[y_start:y_stop, x_start:x_stop]
|
||||
cv2.imwrite(os.path.join(str(save_dir), f"{new_name}.jpg"), patch_im)
|
||||
cv2.imwrite(str(save_dir / f"{new_name}.jpg"), patch_im)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
split_trainval(
|
||||
data_root="DOTAv2",
|
||||
save_dir="DOTAv2-split",
|
||||
)
|
||||
split_test(
|
||||
data_root="DOTAv2",
|
||||
save_dir="DOTAv2-split",
|
||||
)
|
||||
split_trainval(data_root="DOTAv2", save_dir="DOTAv2-split")
|
||||
split_test(data_root="DOTAv2", save_dir="DOTAv2-split")
|
||||
|
@ -198,12 +198,7 @@ class PromptEncoder(nn.Module):
|
||||
"""
|
||||
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
|
||||
|
||||
def _embed_points(
|
||||
self,
|
||||
points: torch.Tensor,
|
||||
labels: torch.Tensor,
|
||||
pad: bool,
|
||||
) -> torch.Tensor:
|
||||
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
|
||||
"""Embeds point prompts."""
|
||||
points = points + 0.5 # Shift to center of pixel
|
||||
if pad:
|
||||
|
@ -84,12 +84,8 @@ def benchmark(
|
||||
emoji, filename = "❌", None # export defaults
|
||||
try:
|
||||
assert i != 9 or LINUX, "Edge TPU export only supported on Linux"
|
||||
if i == 5:
|
||||
assert MACOS or LINUX, "CoreML export only supported on macOS and Linux"
|
||||
elif i == 10:
|
||||
assert MACOS or LINUX, "TF.js export only supported on macOS and Linux"
|
||||
# elif i == 11:
|
||||
# assert sys.version_info < (3, 11), "PaddlePaddle export only supported on Python<=3.10"
|
||||
if i in {5, 10}: # CoreML and TF.js
|
||||
assert MACOS or LINUX, "export only supported on macOS and Linux"
|
||||
if "cpu" in device.type:
|
||||
assert cpu, "inference not supported on CPU"
|
||||
if "cuda" in device.type:
|
||||
|
@ -105,12 +105,7 @@ def _fetch_trainer_metadata(trainer):
|
||||
save_interval = curr_epoch % save_period == 0
|
||||
save_assets = save and save_period > 0 and save_interval and not final_epoch
|
||||
|
||||
return dict(
|
||||
curr_epoch=curr_epoch,
|
||||
curr_step=curr_step,
|
||||
save_assets=save_assets,
|
||||
final_epoch=final_epoch,
|
||||
)
|
||||
return dict(curr_epoch=curr_epoch, curr_step=curr_step, save_assets=save_assets, final_epoch=final_epoch)
|
||||
|
||||
|
||||
def _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad):
|
||||
@ -218,11 +213,7 @@ def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch):
|
||||
conf_mat = trainer.validator.confusion_matrix.matrix
|
||||
names = list(trainer.data["names"].values()) + ["background"]
|
||||
experiment.log_confusion_matrix(
|
||||
matrix=conf_mat,
|
||||
labels=names,
|
||||
max_categories=len(names),
|
||||
epoch=curr_epoch,
|
||||
step=curr_step,
|
||||
matrix=conf_mat, labels=names, max_categories=len(names), epoch=curr_epoch, step=curr_step
|
||||
)
|
||||
|
||||
|
||||
@ -294,12 +285,7 @@ def _log_plots(experiment, trainer):
|
||||
def _log_model(experiment, trainer):
|
||||
"""Log the best-trained model to Comet.ml."""
|
||||
model_name = _get_comet_model_name()
|
||||
experiment.log_model(
|
||||
model_name,
|
||||
file_or_folder=str(trainer.best),
|
||||
file_name="best.pt",
|
||||
overwrite=True,
|
||||
)
|
||||
experiment.log_model(model_name, file_or_folder=str(trainer.best), file_name="best.pt", overwrite=True)
|
||||
|
||||
|
||||
def on_pretrain_routine_start(trainer):
|
||||
@ -320,11 +306,7 @@ def on_train_epoch_end(trainer):
|
||||
curr_epoch = metadata["curr_epoch"]
|
||||
curr_step = metadata["curr_step"]
|
||||
|
||||
experiment.log_metrics(
|
||||
trainer.label_loss_items(trainer.tloss, prefix="train"),
|
||||
step=curr_step,
|
||||
epoch=curr_epoch,
|
||||
)
|
||||
experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix="train"), step=curr_step, epoch=curr_epoch)
|
||||
|
||||
if curr_epoch == 1:
|
||||
_log_images(experiment, trainer.save_dir.glob("train_batch*.jpg"), curr_step)
|
||||
|
@ -38,9 +38,7 @@ class VarifocalLoss(nn.Module):
|
||||
class FocalLoss(nn.Module):
|
||||
"""Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
):
|
||||
def __init__(self):
|
||||
"""Initializer for FocalLoss class with no parameters."""
|
||||
super().__init__()
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user