From 7c6b1cdde800527010fef3b82dcb3772bcec00ef Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Wed, 24 Jul 2024 14:44:50 +0900
Subject: [PATCH 01/15] =?UTF-8?q?chore:=20GPU=E3=81=A8CPU=E7=94=A8?=
 =?UTF-8?q?=E3=81=AE=E4=BE=9D=E5=AD=98=E9=96=A2=E4=BF=82=E3=83=95=E3=82=A1?=
 =?UTF-8?q?=E3=82=A4=E3=83=AB=E3=82=92=E8=BF=BD=E5=8A=A0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 requirements.txt => official-requirements.txt |  0
 requirements-cpu.txt                          | 94 ++++++++++++++++++
 requirements-gpu.txt                          | 95 +++++++++++++++++++
 3 files changed, 189 insertions(+)
 rename requirements.txt => official-requirements.txt (100%)
 create mode 100644 requirements-cpu.txt
 create mode 100644 requirements-gpu.txt

diff --git a/requirements.txt b/official-requirements.txt
similarity index 100%
rename from requirements.txt
rename to official-requirements.txt
diff --git a/requirements-cpu.txt b/requirements-cpu.txt
new file mode 100644
index 00000000..207ec31d
--- /dev/null
+++ b/requirements-cpu.txt
@@ -0,0 +1,94 @@
+aiofiles==23.2.1
+altair==5.3.0
+annotated-types==0.7.0
+anyio==4.4.0
+attrs==23.2.0
+certifi==2024.7.4
+charset-normalizer==3.3.2
+click==8.1.7
+colorama==0.4.6
+coloredlogs==15.0.1
+contourpy==1.2.1
+cycler==0.12.1
+dnspython==2.6.1
+email_validator==2.2.0
+exceptiongroup==1.2.2
+fastapi==0.111.1
+fastapi-cli==0.0.4
+ffmpy==0.3.2
+filelock==3.15.4
+flatbuffers==24.3.25
+fonttools==4.53.1
+fsspec==2024.6.1
+gradio==4.31.5
+gradio_client==0.16.4
+h11==0.14.0
+httpcore==1.0.5
+httptools==0.6.1
+httpx==0.27.0
+huggingface-hub==0.23.2
+humanfriendly==10.0
+idna==3.7
+importlib_resources==6.4.0
+Jinja2==3.1.4
+jsonschema==4.23.0
+jsonschema-specifications==2023.12.1
+kiwisolver==1.4.5
+markdown-it-py==3.0.0
+MarkupSafe==2.1.5
+matplotlib==3.9.1
+mdurl==0.1.2
+mpmath==1.3.0
+networkx==3.3
+numpy==1.26.4
+onnx==1.14.0
+onnxruntime==1.15.1
+onnxruntime-gpu==1.18.0
+onnxsim==0.4.36
+opencv-python==4.9.0.80
+orjson==3.10.6
+packaging==24.1
+pandas==2.2.2
+pillow==10.4.0
+protobuf==5.27.2
+psutil==5.9.8
+py-cpuinfo==9.0.0
+pycocotools==2.0.7
+pydantic==2.8.2
+pydantic_core==2.20.1
+pydub==0.25.1
+Pygments==2.18.0
+pyparsing==3.1.2
+pyreadline3==3.4.1
+python-dateutil==2.9.0.post0
+python-dotenv==1.0.1
+python-multipart==0.0.9
+pytz==2024.1
+PyYAML==6.0.1
+referencing==0.35.1
+requests==2.32.3
+rich==13.7.1
+rpds-py==0.19.0
+ruff==0.5.4
+safetensors==0.4.3
+scipy==1.13.0
+seaborn==0.13.2
+semantic-version==2.10.0
+shellingham==1.5.4
+six==1.16.0
+sniffio==1.3.1
+starlette==0.37.2
+sympy==1.13.1
+thop==0.1.1
+tomlkit==0.12.0
+toolz==0.12.1
+torch==2.0.1
+torchvision==0.15.2
+tqdm==4.66.4
+typer==0.12.3
+typing_extensions==4.12.2
+tzdata==2024.1
+urllib3==2.2.2
+uvicorn==0.30.3
+watchfiles==0.22.0
+websockets==11.0.3
diff --git a/requirements-gpu.txt b/requirements-gpu.txt
new file mode 100644
index 00000000..b049c5e5
--- /dev/null
+++ b/requirements-gpu.txt
@@ -0,0 +1,95 @@
+aiofiles==23.2.1
+altair==5.3.0
+annotated-types==0.7.0
+anyio==4.4.0
+attrs==23.2.0
+certifi==2024.7.4
+charset-normalizer==3.3.2
+click==8.1.7
+colorama==0.4.6
+coloredlogs==15.0.1
+contourpy==1.2.1
+cycler==0.12.1
+dnspython==2.6.1
+email_validator==2.2.0
+exceptiongroup==1.2.2
+fastapi==0.111.1
+fastapi-cli==0.0.4
+ffmpy==0.3.2
+filelock==3.15.4
+flatbuffers==24.3.25
+fonttools==4.53.1
+fsspec==2024.6.1
+gradio==4.31.5
+gradio_client==0.16.4
+h11==0.14.0
+httpcore==1.0.5
+httptools==0.6.1
+httpx==0.27.0
+huggingface-hub==0.23.2
+humanfriendly==10.0
+idna==3.7
+importlib_resources==6.4.0
+Jinja2==3.1.4
+jsonschema==4.23.0
+jsonschema-specifications==2023.12.1
+kiwisolver==1.4.5
+markdown-it-py==3.0.0
+MarkupSafe==2.1.5
+matplotlib==3.9.1
+mdurl==0.1.2
+mpmath==1.3.0
+networkx==3.3
+numpy==1.26.4
+onnx==1.14.0
+onnxruntime-gpu==1.18.0
+onnxsim==0.4.36
+opencv-python==4.9.0.80
+orjson==3.10.6
+packaging==24.1
+pandas==2.2.2
+pillow==10.4.0
+protobuf==5.27.2
+psutil==5.9.8
+py-cpuinfo==9.0.0
+pycocotools==2.0.7
+pydantic==2.8.2
+pydantic_core==2.20.1
+pydub==0.25.1
+Pygments==2.18.0
+pyparsing==3.1.2
+pyreadline3==3.4.1
+python-dateutil==2.9.0.post0
+python-dotenv==1.0.1
+python-multipart==0.0.9
+pytz==2024.1
+PyYAML==6.0.1
+referencing==0.35.1
+requests==2.32.3
+rich==13.7.1
+rpds-py==0.19.0
+ruff==0.5.4
+safetensors==0.4.3
+scipy==1.13.0
+seaborn==0.13.2
+semantic-version==2.10.0
+shellingham==1.5.4
+six==1.16.0
+sniffio==1.3.1
+starlette==0.37.2
+sympy==1.13.1
+thop==0.1.1
+tomlkit==0.12.0
+toolz==0.12.1
+tqdm==4.66.4
+typer==0.12.3
+typing_extensions==4.12.2
+tzdata==2024.1
+urllib3==2.2.2
+uvicorn==0.30.3
+watchfiles==0.22.0
+websockets==11.0.3
+
+--extra-index-url https://download.pytorch.org/whl/cu118
+torch==2.0.1+cu118
+torchvision==0.15.2+cu118

From 2be63bd1c557a0443e2aad9e97a51f340350494e Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Wed, 24 Jul 2024 15:06:58 +0900
Subject: [PATCH 02/15] =?UTF-8?q?chore:=20=E4=BE=9D=E5=AD=98=E9=96=A2?=
 =?UTF-8?q?=E4=BF=82=E3=81=AE=E6=B6=88=E3=81=97=E3=81=A6=E3=81=97=E3=81=BE?=
 =?UTF-8?q?=E3=81=A3=E3=81=9F=E9=83=A8=E5=88=86=E3=82=92=E4=BF=AE=E6=AD=A3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 requirements-cpu.txt | 2 +-
 requirements-gpu.txt | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/requirements-cpu.txt b/requirements-cpu.txt
index 207ec31d..96670be4 100644
--- a/requirements-cpu.txt
+++ b/requirements-cpu.txt
@@ -79,7 +79,7 @@ six==1.16.0
 sniffio==1.3.1
 starlette==0.37.2
 sympy==1.13.1
-thop==0.1.1
+thop==0.1.1.post2209072238
 tomlkit==0.12.0
 toolz==0.12.1
 torch==2.0.1
diff --git a/requirements-gpu.txt b/requirements-gpu.txt
index b049c5e5..e2a531cf 100644
--- a/requirements-gpu.txt
+++ b/requirements-gpu.txt
@@ -78,7 +78,7 @@ six==1.16.0
 sniffio==1.3.1
 starlette==0.37.2
 sympy==1.13.1
-thop==0.1.1
+thop==0.1.1.post2209072238
 tomlkit==0.12.0
 toolz==0.12.1
 tqdm==4.66.4

From 7a2de1541b11f21744a21ac3c048b3d0be871dc6 Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Wed, 31 Jul 2024 22:08:25 +0900
Subject: [PATCH 03/15] =?UTF-8?q?chore:=20=E4=BE=9D=E5=AD=98=E9=96=A2?=
 =?UTF-8?q?=E4=BF=82=E3=82=92=E6=9B=B4=E6=96=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 official-requirements.txt |  4 ++--
 requirements-cpu.txt      | 10 ++++------
 requirements-gpu.txt      |  4 +---
 3 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/official-requirements.txt b/official-requirements.txt
index 0464ed55..9c294012 100644
--- a/official-requirements.txt
+++ b/official-requirements.txt
@@ -5,11 +5,11 @@ onnxruntime==1.15.1
 pycocotools==2.0.7
 PyYAML==6.0.1
 scipy==1.13.0
-onnxsim==0.4.36
+onnxslim==0.1.31
 onnxruntime-gpu==1.18.0
 gradio==4.31.5
 opencv-python==4.9.0.80
 psutil==5.9.8
 py-cpuinfo==9.0.0
 huggingface-hub==0.23.2
-safetensors==0.4.3
\ No newline at end of file
+safetensors==0.4.3
diff --git a/requirements-cpu.txt b/requirements-cpu.txt
index 96670be4..2e61b509 100644
--- a/requirements-cpu.txt
+++ b/requirements-cpu.txt
@@ -15,7 +15,7 @@ email_validator==2.2.0
 exceptiongroup==1.2.2
 fastapi==0.111.1
 fastapi-cli==0.0.4
-ffmpy==0.3.2
+ffmpy==0.4.0
 filelock==3.15.4
 flatbuffers==24.3.25
 fonttools==4.53.1
@@ -44,7 +44,7 @@ numpy==1.26.4
 onnx==1.14.0
 onnxruntime==1.15.1
 onnxruntime-gpu==1.18.0
-onnxsim==0.4.36
+onnxslim==0.1.31
 opencv-python==4.9.0.80
 orjson==3.10.6
 packaging==24.1
@@ -68,18 +68,16 @@ PyYAML==6.0.1
 referencing==0.35.1
 requests==2.32.3
 rich==13.7.1
-rpds-py==0.19.0
-ruff==0.5.4
+rpds-py==0.19.1
+ruff==0.5.5
 safetensors==0.4.3
 scipy==1.13.0
-seaborn==0.13.2
 semantic-version==2.10.0
 shellingham==1.5.4
 six==1.16.0
 sniffio==1.3.1
 starlette==0.37.2
 sympy==1.13.1
-thop==0.1.1.post2209072238
 tomlkit==0.12.0
 toolz==0.12.1
 torch==2.0.1
diff --git a/requirements-gpu.txt b/requirements-gpu.txt
index e2a531cf..fff9befd 100644
--- a/requirements-gpu.txt
+++ b/requirements-gpu.txt
@@ -43,7 +43,7 @@ networkx==3.3
 numpy==1.26.4
 onnx==1.14.0
 onnxruntime-gpu==1.18.0
-onnxsim==0.4.36
+onnxslim==0.1.31
 opencv-python==4.9.0.80
 orjson==3.10.6
 packaging==24.1
@@ -71,14 +71,12 @@ rpds-py==0.19.0
 ruff==0.5.4
 safetensors==0.4.3
 scipy==1.13.0
-seaborn==0.13.2
 semantic-version==2.10.0
 shellingham==1.5.4
 six==1.16.0
 sniffio==1.3.1
 starlette==0.37.2
 sympy==1.13.1
-thop==0.1.1.post2209072238
 tomlkit==0.12.0
 toolz==0.12.1
 tqdm==4.66.4

From 864e25d5760813989e2f1eb4caa8790db5b1c208 Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Wed, 31 Jul 2024 23:21:52 +0900
Subject: [PATCH 04/15] =?UTF-8?q?chore:=20Python=203.9=E7=94=A8=E3=81=AE?=
 =?UTF-8?q?=E4=BE=9D=E5=AD=98=E9=96=A2=E4=BF=82=E3=82=92=E6=A7=8B=E7=AF=89?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 requirements-cpu.txt | 5 ++++-
 requirements-gpu.txt | 5 ++++-
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/requirements-cpu.txt b/requirements-cpu.txt
index 2e61b509..0e82a233 100644
--- a/requirements-cpu.txt
+++ b/requirements-cpu.txt
@@ -39,7 +39,7 @@ MarkupSafe==2.1.5
 matplotlib==3.9.1
 mdurl==0.1.2
 mpmath==1.3.0
-networkx==3.3
+networkx==3.2.1
 numpy==1.26.4
 onnx==1.14.0
 onnxruntime==1.15.1
@@ -72,12 +72,14 @@ rpds-py==0.19.1
 ruff==0.5.5
 safetensors==0.4.3
 scipy==1.13.0
+seaborn==0.13.2
 semantic-version==2.10.0
 shellingham==1.5.4
 six==1.16.0
 sniffio==1.3.1
 starlette==0.37.2
 sympy==1.13.1
+thop==0.1.1.post2209072238
 tomlkit==0.12.0
 toolz==0.12.1
 torch==2.0.1
@@ -90,3 +92,4 @@ urllib3==2.2.2
 uvicorn==0.30.3
 watchfiles==0.22.0
 websockets==11.0.3
+zipp==3.19.2
diff --git a/requirements-gpu.txt b/requirements-gpu.txt
index fff9befd..c22dda71 100644
--- a/requirements-gpu.txt
+++ b/requirements-gpu.txt
@@ -39,7 +39,7 @@ MarkupSafe==2.1.5
 matplotlib==3.9.1
 mdurl==0.1.2
 mpmath==1.3.0
-networkx==3.3
+networkx==3.2.1
 numpy==1.26.4
 onnx==1.14.0
 onnxruntime-gpu==1.18.0
@@ -71,12 +71,14 @@ rpds-py==0.19.0
 ruff==0.5.4
 safetensors==0.4.3
 scipy==1.13.0
+seaborn==0.13.2
 semantic-version==2.10.0
 shellingham==1.5.4
 six==1.16.0
 sniffio==1.3.1
 starlette==0.37.2
 sympy==1.13.1
+thop==0.1.1.post2209072238
 tomlkit==0.12.0
 toolz==0.12.1
 tqdm==4.66.4
@@ -87,6 +89,7 @@ urllib3==2.2.2
 uvicorn==0.30.3
 watchfiles==0.22.0
 websockets==11.0.3
+zipp==3.19.2
 
 --extra-index-url https://download.pytorch.org/whl/cu118
 torch==2.0.1+cu118

From 21aa2470023e01dcd5bdac581e3d7819b57a8097 Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Thu, 1 Aug 2024 01:23:12 +0900
Subject: [PATCH 05/15] =?UTF-8?q?chore:=20=E4=B8=8D=E8=A6=81=E3=81=AAignor?=
 =?UTF-8?q?e=E3=82=92=E5=89=8A=E9=99=A4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 .gitignore | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/.gitignore b/.gitignore
index 0854267a..f99e12a8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -85,9 +85,6 @@ ipython_config.py
 # Profiling
 *.pclprof
 
-# pyenv
-.python-version
-
 # pipenv
 #   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
 #   However, in case of collaboration, if having platform-specific dependencies or dependencies
@@ -138,14 +135,12 @@ dmypy.json
 .pyre/
 
 # datasets and projects
-datasets/
-runs/
 wandb/
 tests/
 .DS_Store
 
 # Neural Network weights -----------------------------------------------------------------------------------------------
-weights/
+weights/*
 *.weights
 *.pt
 *.pb

From b1289994c15b20640fa733381007a1db810eb85b Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Thu, 1 Aug 2024 01:23:45 +0900
Subject: [PATCH 06/15] =?UTF-8?q?chore:=20Python=E3=83=90=E3=83=BC?=
 =?UTF-8?q?=E3=82=B8=E3=83=A7=E3=83=B3=E3=82=923.9.13=E3=81=AB=E7=B5=B1?=
 =?UTF-8?q?=E4=B8=80?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 .python-version | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 .python-version

diff --git a/.python-version b/.python-version
new file mode 100644
index 00000000..7c76bce0
--- /dev/null
+++ b/.python-version
@@ -0,0 +1 @@
+3.9.13

From 028840af4b64d74f715b56ea013c285013ab311f Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Thu, 1 Aug 2024 01:24:14 +0900
Subject: [PATCH 07/15] =?UTF-8?q?feat:=20=E3=83=A2=E3=83=87=E3=83=AB?=
 =?UTF-8?q?=E3=81=AE=E3=83=8F=E3=82=A4=E3=83=91=E3=83=A9=E7=AD=89=E3=81=AE?=
 =?UTF-8?q?=E8=A8=AD=E5=AE=9A=E3=83=95=E3=82=A1=E3=82=A4=E3=83=AB=E3=82=92?=
 =?UTF-8?q?=E8=BF=BD=E5=8A=A0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 cfg/pineapple.yaml | 127 +++++++++++++++++++++++++++++++++++++++++++++
 cfg/sugarcane.yaml | 127 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 254 insertions(+)
 create mode 100644 cfg/pineapple.yaml
 create mode 100644 cfg/sugarcane.yaml

diff --git a/cfg/pineapple.yaml b/cfg/pineapple.yaml
new file mode 100644
index 00000000..bc64897e
--- /dev/null
+++ b/cfg/pineapple.yaml
@@ -0,0 +1,127 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Default training settings and hyperparameters for medium-augmentation COCO training
+
+task: detect # (str) YOLO task, i.e. detect, segment, classify, pose
+mode: train # (str) YOLO mode, i.e. train, val, predict, export, track, benchmark
+
+# Train settings -------------------------------------------------------------------------------------------------------
+model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml
+data: # (str, optional) path to data file, i.e. coco128.yaml
+epochs: 100 # (int) number of epochs to train for
+time: # (float, optional) number of hours to train for, overrides epochs if supplied
+patience: 100 # (int) epochs to wait for no observable improvement for early stopping of training
+batch: 16 # (int) number of images per batch (-1 for AutoBatch)
+imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes
+save: True # (bool) save train checkpoints and predict results
+save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1)
+val_period: 1 # (int) Validation every x epochs
+cache: False # (bool) True/ram, disk or False. Use cache for data loading
+device: # (int | str | list, optional) device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu
+workers: 8 # (int) number of worker threads for data loading (per RANK if DDP)
+project: # (str, optional) project name
+name: # (str, optional) experiment name, results saved to 'project/name' directory
+exist_ok: False # (bool) whether to overwrite existing experiment
+pretrained: True # (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str)
+optimizer: auto # (str) optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto]
+verbose: True # (bool) whether to print verbose output
+seed: 0 # (int) random seed for reproducibility
+deterministic: True # (bool) whether to enable deterministic mode
+single_cls: False # (bool) train multi-class data as single-class
+rect: False # (bool) rectangular training if mode='train' or rectangular validation if mode='val'
+cos_lr: False # (bool) use cosine learning rate scheduler
+close_mosaic: 10 # (int) disable mosaic augmentation for final epochs (0 to disable)
+resume: False # (bool) resume training from last checkpoint
+amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check
+fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set)
+profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers
+freeze: None # (int | list, optional) freeze first n layers, or freeze list of layer indices during training
+multi_scale: False # (bool) Whether to use multiscale during training
+# Segmentation
+overlap_mask: True # (bool) masks should overlap during training (segment train only)
+mask_ratio: 4 # (int) mask downsample ratio (segment train only)
+# Classification
+dropout: 0.0 # (float) use dropout regularization (classify train only)
+
+# Val/Test settings ----------------------------------------------------------------------------------------------------
+val: True # (bool) validate/test during training
+split: val # (str) dataset split to use for validation, i.e. 'val', 'test' or 'train'
+save_json: False # (bool) save results to JSON file
+save_hybrid: False # (bool) save hybrid version of labels (labels + additional predictions)
+conf: # (float, optional) object confidence threshold for detection (default 0.25 predict, 0.001 val)
+iou: 0.7 # (float) intersection over union (IoU) threshold for NMS
+max_det: 300 # (int) maximum number of detections per image
+half: False # (bool) use half precision (FP16)
+dnn: False # (bool) use OpenCV DNN for ONNX inference
+plots: True # (bool) save plots and images during train/val
+
+# Predict settings -----------------------------------------------------------------------------------------------------
+source: # (str, optional) source directory for images or videos
+vid_stride: 1 # (int) video frame-rate stride
+stream_buffer: False # (bool) buffer all streaming frames (True) or return the most recent frame (False)
+visualize: False # (bool) visualize model features
+augment: False # (bool) apply image augmentation to prediction sources
+agnostic_nms: False # (bool) class-agnostic NMS
+classes: # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3]
+retina_masks: False # (bool) use high-resolution segmentation masks
+embed: # (list[int], optional) return feature vectors/embeddings from given layers
+
+# Visualize settings ---------------------------------------------------------------------------------------------------
+show: False # (bool) show predicted images and videos if environment allows
+save_frames: False # (bool) save predicted individual video frames
+save_txt: False # (bool) save results as .txt file
+save_conf: False # (bool) save results with confidence scores
+save_crop: False # (bool) save cropped images with results
+show_labels: True # (bool) show prediction labels, i.e. 'person'
+show_conf: True # (bool) show prediction confidence, i.e. '0.99'
+show_boxes: True # (bool) show prediction boxes
+line_width: # (int, optional) line width of the bounding boxes. Scaled to image size if None.
+
+# Export settings ------------------------------------------------------------------------------------------------------
+format: torchscript # (str) format to export to, choices at https://docs.ultralytics.com/modes/export/#export-formats
+keras: False # (bool) use Kera=s
+optimize: False # (bool) TorchScript: optimize for mobile
+int8: False # (bool) CoreML/TF INT8 quantization
+dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes
+simplify: False # (bool) ONNX: simplify model
+opset: # (int, optional) ONNX: opset version
+workspace: 4 # (int) TensorRT: workspace size (GB)
+nms: False # (bool) CoreML: add NMS
+
+# Hyperparameters ------------------------------------------------------------------------------------------------------
+lr0: 0.01 # (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3)
+lrf: 0.01 # (float) final learning rate (lr0 * lrf)
+momentum: 0.937 # (float) SGD momentum/Adam beta1
+weight_decay: 0.0005 # (float) optimizer weight decay 5e-4
+warmup_epochs: 3.0 # (float) warmup epochs (fractions ok)
+warmup_momentum: 0.8 # (float) warmup initial momentum
+warmup_bias_lr: 0.1 # (float) warmup initial bias lr
+box: 7.5 # (float) box loss gain
+cls: 0.5 # (float) cls loss gain (scale with pixels)
+dfl: 1.5 # (float) dfl loss gain
+pose: 12.0 # (float) pose loss gain
+kobj: 1.0 # (float) keypoint obj loss gain
+label_smoothing: 0.0 # (float) label smoothing (fraction)
+nbs: 64 # (int) nominal batch size
+hsv_h: 0.015 # (float) image HSV-Hue augmentation (fraction)
+hsv_s: 0.7 # (float) image HSV-Saturation augmentation (fraction)
+hsv_v: 0.4 # (float) image HSV-Value augmentation (fraction)
+degrees: 0.0 # (float) image rotation (+/- deg)
+translate: 0.1 # (float) image translation (+/- fraction)
+scale: 0.5 # (float) image scale (+/- gain)
+shear: 0.0 # (float) image shear (+/- deg)
+perspective: 0.0 # (float) image perspective (+/- fraction), range 0-0.001
+flipud: 0.0 # (float) image flip up-down (probability)
+fliplr: 0.5 # (float) image flip left-right (probability)
+bgr: 0.0 # (float) image channel BGR (probability)
+mosaic: 1.0 # (float) image mosaic (probability)
+mixup: 0.0 # (float) image mixup (probability)
+copy_paste: 0.0 # (float) segment copy-paste (probability)
+auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)
+erasing: 0.4 # (float) probability of random erasing during classification training (0-1)
+crop_fraction: 1.0 # (float) image crop fraction for classification evaluation/inference (0-1)
+
+# Custom config.yaml ---------------------------------------------------------------------------------------------------
+cfg: # (str, optional) for overriding defaults.yaml
+
+# Tracker settings ------------------------------------------------------------------------------------------------------
+tracker: botsort.yaml # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml]
diff --git a/cfg/sugarcane.yaml b/cfg/sugarcane.yaml
new file mode 100644
index 00000000..bc64897e
--- /dev/null
+++ b/cfg/sugarcane.yaml
@@ -0,0 +1,127 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Default training settings and hyperparameters for medium-augmentation COCO training
+
+task: detect # (str) YOLO task, i.e. detect, segment, classify, pose
+mode: train # (str) YOLO mode, i.e. train, val, predict, export, track, benchmark
+
+# Train settings -------------------------------------------------------------------------------------------------------
+model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml
+data: # (str, optional) path to data file, i.e. coco128.yaml
+epochs: 100 # (int) number of epochs to train for
+time: # (float, optional) number of hours to train for, overrides epochs if supplied
+patience: 100 # (int) epochs to wait for no observable improvement for early stopping of training
+batch: 16 # (int) number of images per batch (-1 for AutoBatch)
+imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes
+save: True # (bool) save train checkpoints and predict results
+save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1)
+val_period: 1 # (int) Validation every x epochs
+cache: False # (bool) True/ram, disk or False. Use cache for data loading
+device: # (int | str | list, optional) device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu
+workers: 8 # (int) number of worker threads for data loading (per RANK if DDP)
+project: # (str, optional) project name
+name: # (str, optional) experiment name, results saved to 'project/name' directory
+exist_ok: False # (bool) whether to overwrite existing experiment
+pretrained: True # (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str)
+optimizer: auto # (str) optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto]
+verbose: True # (bool) whether to print verbose output
+seed: 0 # (int) random seed for reproducibility
+deterministic: True # (bool) whether to enable deterministic mode
+single_cls: False # (bool) train multi-class data as single-class
+rect: False # (bool) rectangular training if mode='train' or rectangular validation if mode='val'
+cos_lr: False # (bool) use cosine learning rate scheduler
+close_mosaic: 10 # (int) disable mosaic augmentation for final epochs (0 to disable)
+resume: False # (bool) resume training from last checkpoint
+amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check
+fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set)
+profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers
+freeze: None # (int | list, optional) freeze first n layers, or freeze list of layer indices during training
+multi_scale: False # (bool) Whether to use multiscale during training
+# Segmentation
+overlap_mask: True # (bool) masks should overlap during training (segment train only)
+mask_ratio: 4 # (int) mask downsample ratio (segment train only)
+# Classification
+dropout: 0.0 # (float) use dropout regularization (classify train only)
+
+# Val/Test settings ----------------------------------------------------------------------------------------------------
+val: True # (bool) validate/test during training
+split: val # (str) dataset split to use for validation, i.e. 'val', 'test' or 'train'
+save_json: False # (bool) save results to JSON file
+save_hybrid: False # (bool) save hybrid version of labels (labels + additional predictions)
+conf: # (float, optional) object confidence threshold for detection (default 0.25 predict, 0.001 val)
+iou: 0.7 # (float) intersection over union (IoU) threshold for NMS
+max_det: 300 # (int) maximum number of detections per image
+half: False # (bool) use half precision (FP16)
+dnn: False # (bool) use OpenCV DNN for ONNX inference
+plots: True # (bool) save plots and images during train/val
+
+# Predict settings -----------------------------------------------------------------------------------------------------
+source: # (str, optional) source directory for images or videos
+vid_stride: 1 # (int) video frame-rate stride
+stream_buffer: False # (bool) buffer all streaming frames (True) or return the most recent frame (False)
+visualize: False # (bool) visualize model features
+augment: False # (bool) apply image augmentation to prediction sources
+agnostic_nms: False # (bool) class-agnostic NMS
+classes: # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3]
+retina_masks: False # (bool) use high-resolution segmentation masks
+embed: # (list[int], optional) return feature vectors/embeddings from given layers
+
+# Visualize settings ---------------------------------------------------------------------------------------------------
+show: False # (bool) show predicted images and videos if environment allows
+save_frames: False # (bool) save predicted individual video frames
+save_txt: False # (bool) save results as .txt file
+save_conf: False # (bool) save results with confidence scores
+save_crop: False # (bool) save cropped images with results
+show_labels: True # (bool) show prediction labels, i.e. 'person'
+show_conf: True # (bool) show prediction confidence, i.e. '0.99'
+show_boxes: True # (bool) show prediction boxes
+line_width: # (int, optional) line width of the bounding boxes. Scaled to image size if None.
+
+# Export settings ------------------------------------------------------------------------------------------------------
+format: torchscript # (str) format to export to, choices at https://docs.ultralytics.com/modes/export/#export-formats
+keras: False # (bool) use Kera=s
+optimize: False # (bool) TorchScript: optimize for mobile
+int8: False # (bool) CoreML/TF INT8 quantization
+dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes
+simplify: False # (bool) ONNX: simplify model
+opset: # (int, optional) ONNX: opset version
+workspace: 4 # (int) TensorRT: workspace size (GB)
+nms: False # (bool) CoreML: add NMS
+
+# Hyperparameters ------------------------------------------------------------------------------------------------------
+lr0: 0.01 # (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3)
+lrf: 0.01 # (float) final learning rate (lr0 * lrf)
+momentum: 0.937 # (float) SGD momentum/Adam beta1
+weight_decay: 0.0005 # (float) optimizer weight decay 5e-4
+warmup_epochs: 3.0 # (float) warmup epochs (fractions ok)
+warmup_momentum: 0.8 # (float) warmup initial momentum
+warmup_bias_lr: 0.1 # (float) warmup initial bias lr
+box: 7.5 # (float) box loss gain
+cls: 0.5 # (float) cls loss gain (scale with pixels)
+dfl: 1.5 # (float) dfl loss gain
+pose: 12.0 # (float) pose loss gain
+kobj: 1.0 # (float) keypoint obj loss gain
+label_smoothing: 0.0 # (float) label smoothing (fraction)
+nbs: 64 # (int) nominal batch size
+hsv_h: 0.015 # (float) image HSV-Hue augmentation (fraction)
+hsv_s: 0.7 # (float) image HSV-Saturation augmentation (fraction)
+hsv_v: 0.4 # (float) image HSV-Value augmentation (fraction)
+degrees: 0.0 # (float) image rotation (+/- deg)
+translate: 0.1 # (float) image translation (+/- fraction)
+scale: 0.5 # (float) image scale (+/- gain)
+shear: 0.0 # (float) image shear (+/- deg)
+perspective: 0.0 # (float) image perspective (+/- fraction), range 0-0.001
+flipud: 0.0 # (float) image flip up-down (probability)
+fliplr: 0.5 # (float) image flip left-right (probability)
+bgr: 0.0 # (float) image channel BGR (probability)
+mosaic: 1.0 # (float) image mosaic (probability)
+mixup: 0.0 # (float) image mixup (probability)
+copy_paste: 0.0 # (float) segment copy-paste (probability)
+auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)
+erasing: 0.4 # (float) probability of random erasing during classification training (0-1)
+crop_fraction: 1.0 # (float) image crop fraction for classification evaluation/inference (0-1)
+
+# Custom config.yaml ---------------------------------------------------------------------------------------------------
+cfg: # (str, optional) for overriding defaults.yaml
+
+# Tracker settings ------------------------------------------------------------------------------------------------------
+tracker: botsort.yaml # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml]

From f7ec0298b628a390adbc926886f65fd11f569c33 Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Thu, 1 Aug 2024 01:24:40 +0900
Subject: [PATCH 08/15] =?UTF-8?q?feat:=20datasets=E9=96=A2=E9=80=A3?=
 =?UTF-8?q?=E3=82=92=E8=BF=BD=E5=8A=A0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 datasets/.gitignore                    |  8 ++++
 datasets/README.md                     | 56 ++++++++++++++++++++++++++
 datasets/pineapple/README.dataset.txt  |  6 +++
 datasets/pineapple/README.roboflow.txt | 27 +++++++++++++
 datasets/pineapple/data.yaml           | 14 +++++++
 datasets/sugarcane/README.dataset.txt  |  6 +++
 datasets/sugarcane/README.roboflow.txt | 27 +++++++++++++
 datasets/sugarcane/data.yaml           | 14 +++++++
 8 files changed, 158 insertions(+)
 create mode 100644 datasets/.gitignore
 create mode 100644 datasets/README.md
 create mode 100644 datasets/pineapple/README.dataset.txt
 create mode 100644 datasets/pineapple/README.roboflow.txt
 create mode 100644 datasets/pineapple/data.yaml
 create mode 100644 datasets/sugarcane/README.dataset.txt
 create mode 100644 datasets/sugarcane/README.roboflow.txt
 create mode 100644 datasets/sugarcane/data.yaml

diff --git a/datasets/.gitignore b/datasets/.gitignore
new file mode 100644
index 00000000..116719a0
--- /dev/null
+++ b/datasets/.gitignore
@@ -0,0 +1,8 @@
+# 全てのファイルを除外
+*/*
+
+# 例外
+!.gitignore
+!README.md
+!**/data.yaml
+!**/README*.txt
diff --git a/datasets/README.md b/datasets/README.md
new file mode 100644
index 00000000..ad9705f2
--- /dev/null
+++ b/datasets/README.md
@@ -0,0 +1,56 @@
+# カスタムデータセット
+
+Roboflowというサービスを使用してデーターセットを作成しています。
+
+学習や評価に使用するデータセットは、
+
+- [サトウキビ](https://universe.roboflow.com/hoku/sugarcane-3vhxz/dataset/11)
+- [パイナップル](https://universe.roboflow.com/hoku/pineapple-thsih/dataset/7)
+
+から`YOLO v8`形式でダウンロードし、下記を参考に`train`, `valid`, `test`を各ディレクトリに配置してください。
+
+## ディレクトリ構造
+
+```plaintext
+datasets/
+    .gitignore
+    README.md
+    sugarcane/
+        data.yaml
+        README.dataset.txt
+        README.roboflow.txt
+        train/
+            images/
+                ...
+            labels/
+                ...
+        valid/
+            images/
+                ...
+            labels/
+                ...
+        test/
+            images/
+                ...
+            labels/
+                ...
+    pineapple/
+        data.yaml
+        README.dataset.txt
+        README.roboflow.txt
+        train/
+            images/
+                ...
+            labels/
+                ...
+        valid/
+            images/
+                ...
+            labels/
+                ...
+        test/
+            images/
+                ...
+            labels/
+                ...
+```
diff --git a/datasets/pineapple/README.dataset.txt b/datasets/pineapple/README.dataset.txt
new file mode 100644
index 00000000..39eccf2d
--- /dev/null
+++ b/datasets/pineapple/README.dataset.txt
@@ -0,0 +1,6 @@
+# Pineapple > 2023-07-19 9:57am
+https://universe.roboflow.com/hoku/pineapple-thsih
+
+Provided by a Roboflow user
+License: MIT
+
diff --git a/datasets/pineapple/README.roboflow.txt b/datasets/pineapple/README.roboflow.txt
new file mode 100644
index 00000000..1e88fe1d
--- /dev/null
+++ b/datasets/pineapple/README.roboflow.txt
@@ -0,0 +1,27 @@
+
+Pineapple - v7 2023-07-19 9:57am
+==============================
+
+This dataset was exported via roboflow.com on July 31, 2024 at 3:58 PM GMT
+
+Roboflow is an end-to-end computer vision platform that helps you
+* collaborate with your team on computer vision projects
+* collect & organize images
+* understand and search unstructured image data
+* annotate, and create datasets
+* export, train, and deploy computer vision models
+* use active learning to improve your dataset over time
+
+For state of the art Computer Vision training notebooks you can use with this dataset,
+visit https://github.com/roboflow/notebooks
+
+To find over 100k other datasets and pre-trained models, visit https://universe.roboflow.com
+
+The dataset includes 619 images.
+Pineapple are annotated in YOLOv8 format.
+
+The following pre-processing was applied to each image:
+
+No image augmentation techniques were applied.
+
+
diff --git a/datasets/pineapple/data.yaml b/datasets/pineapple/data.yaml
new file mode 100644
index 00000000..9860cb24
--- /dev/null
+++ b/datasets/pineapple/data.yaml
@@ -0,0 +1,14 @@
+path: datasets/sugarcane # dataset root dir
+train: train/images
+val: valid/images
+test: test/images
+
+nc: 2
+names: ['pineapple', 'weed']
+
+roboflow:
+  workspace: hoku
+  project: pineapple-thsih
+  version: 7
+  license: MIT
+  url: https://universe.roboflow.com/hoku/pineapple-thsih/dataset/7
diff --git a/datasets/sugarcane/README.dataset.txt b/datasets/sugarcane/README.dataset.txt
new file mode 100644
index 00000000..beaa3ab7
--- /dev/null
+++ b/datasets/sugarcane/README.dataset.txt
@@ -0,0 +1,6 @@
+# sugarcane > 2023-07-19 9:58am
+https://universe.roboflow.com/hoku/sugarcane-3vhxz
+
+Provided by a Roboflow user
+License: CC BY 4.0
+
diff --git a/datasets/sugarcane/README.roboflow.txt b/datasets/sugarcane/README.roboflow.txt
new file mode 100644
index 00000000..2a8c9dc2
--- /dev/null
+++ b/datasets/sugarcane/README.roboflow.txt
@@ -0,0 +1,27 @@
+
+sugarcane - v11 2023-07-19 9:58am
+==============================
+
+This dataset was exported via roboflow.com on July 9, 2024 at 5:16 AM GMT
+
+Roboflow is an end-to-end computer vision platform that helps you
+* collaborate with your team on computer vision projects
+* collect & organize images
+* understand and search unstructured image data
+* annotate, and create datasets
+* export, train, and deploy computer vision models
+* use active learning to improve your dataset over time
+
+For state of the art Computer Vision training notebooks you can use with this dataset,
+visit https://github.com/roboflow/notebooks
+
+To find over 100k other datasets and pre-trained models, visit https://universe.roboflow.com
+
+The dataset includes 811 images.
+Sugarcane are annotated in YOLOv8 format.
+
+The following pre-processing was applied to each image:
+
+No image augmentation techniques were applied.
+
+
diff --git a/datasets/sugarcane/data.yaml b/datasets/sugarcane/data.yaml
new file mode 100644
index 00000000..a82f2cb1
--- /dev/null
+++ b/datasets/sugarcane/data.yaml
@@ -0,0 +1,14 @@
+path: datasets/sugarcane # dataset root dir
+train: train/images
+val: valid/images
+test: test/images
+
+nc: 2
+names: ['sugarcane', 'weed']
+
+roboflow:
+  workspace: hoku
+  project: sugarcane-3vhxz
+  version: 11
+  license: CC BY 4.0
+  url: https://universe.roboflow.com/hoku/sugarcane-3vhxz/dataset/11

From 4883af9a60af83a4390d0a7a96716fa0bc0fb268 Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Thu, 1 Aug 2024 01:25:11 +0900
Subject: [PATCH 09/15] =?UTF-8?q?docs:=20=E3=83=A2=E3=83=87=E3=83=AB?=
 =?UTF-8?q?=E3=81=AE=E4=BF=9D=E5=AD=98=E6=96=B9=E6=B3=95=E3=82=92=E8=A8=98?=
 =?UTF-8?q?=E8=BF=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 runs/detect/README.md | 51 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 51 insertions(+)
 create mode 100644 runs/detect/README.md

diff --git a/runs/detect/README.md b/runs/detect/README.md
new file mode 100644
index 00000000..31ad4a1a
--- /dev/null
+++ b/runs/detect/README.md
@@ -0,0 +1,51 @@
+このREADMEは工事中です
+
+# 学習後の保存方法について
+
+## trainのディレクトリ構造
+
+学習後の結果は`runs/detect/<name(番号)>`に保存されます。`<name(番号)>`は学習時にコマンドで指定したnameオプションの値です。<br>
+同じnameオプションの値を指定した場合、`<name(番号)>`の値がインクリメントされます。
+
+そのディレクトリの中身は、学習結果の可視化, モデルの重みファイル, ログファイルなどが保存されています。
+
+## 学習後について
+
+学習でよいスコアが出た場合は、`runs/detect/<name(番号)>/`にREADME.mdを作成してください。
+
+その際のREADME.mdのフォーマットは以下の通りです。
+
+コマンドに学習時のコマンドを、結果に学習後のコンソール画面のスクショパス(console.png)を記載してください。
+
+````markdown
+## コマンド
+
+```bash
+# ここに学習時のコマンドを記載してください
+
+例:
+yolo detect train \
+    cfg='cfg/sugarcane.yaml' \
+    data=datasets/sugarcane/data.yaml \
+    model=weights/yolov10x.pt \
+    name='yolov10x-sugarcane' \
+    epochs=300 \
+    batch=16 \
+    imgsz=640 \
+    device=0
+```
+
+## 学習過程
+
+![results.png](./results.png)
+
+## 結果
+
+![結果のスクショを同ディレクトリ内の`console.png`に保存してください](./console.png)
+````
+
+READMEの例は<<いつか上げる。それまでは[YOLOv9の実装](https://github.com/TechC-SugarCane/train-YOLOv9/tree/main/runs/train/yolov9-e-pineapple-たたき台)を参考にしてほしい>>を参照してください。
+
+## モデルの保存
+
+現在GitHubに上がっているスコアより良いモデルができた場合、<<たぶんfuggingface>>に`best.pt`をアップロードしてください。

From bfef71d80be9a0d55679157d9f7fe72449a376e1 Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Thu, 1 Aug 2024 01:25:53 +0900
Subject: [PATCH 10/15] =?UTF-8?q?chore:=20weights=E3=83=87=E3=82=A3?=
 =?UTF-8?q?=E3=83=AC=E3=82=AF=E3=83=88=E3=83=AA=E3=81=AE=E5=86=85=E9=83=A8?=
 =?UTF-8?q?=E3=82=92=E9=99=A4=E5=A4=96?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 weights/.gitignore | 2 ++
 1 file changed, 2 insertions(+)
 create mode 100644 weights/.gitignore

diff --git a/weights/.gitignore b/weights/.gitignore
new file mode 100644
index 00000000..d6b7ef32
--- /dev/null
+++ b/weights/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore

From bb4327529d518a249903602caa9117094f46aa49 Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Thu, 1 Aug 2024 01:26:26 +0900
Subject: [PATCH 11/15] docs: update README.md

---
 README.md | 208 ++++++++++++++++++++++++------------------------------
 1 file changed, 92 insertions(+), 116 deletions(-)

diff --git a/README.md b/README.md
index fbb74d4c..458d101d 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
-# [YOLOv10: Real-Time End-to-End Object Detection](https://arxiv.org/abs/2405.14458)
+# YOLOv10のファインチューニング
 
 
-Official PyTorch implementation of **YOLOv10**.
+公式のリポジトリからフォークして、独自のデータセットでファインチューニングを行うためのリポジトリです。
 
 <p align="center">
   <img src="figures/latency.svg" width=48%>
@@ -10,35 +10,7 @@ Official PyTorch implementation of **YOLOv10**.
 </p>
 
 [YOLOv10: Real-Time End-to-End Object Detection](https://arxiv.org/abs/2405.14458).\
-Ao Wang, Hui Chen, Lihao Liu, Kai Chen, Zijia Lin, Jungong Han, and Guiguang Ding\
-[![arXiv](https://img.shields.io/badge/arXiv-2405.14458-b31b1b.svg)](https://arxiv.org/abs/2405.14458) <a href="https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov10-object-detection-on-custom-dataset.ipynb#scrollTo=SaKTSzSWnG7s"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Models-blue)](https://huggingface.co/collections/jameslahm/yolov10-665b0d90b0b5bb85129460c2) [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/jameslahm/YOLOv10)  [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/kadirnar/Yolov10)  [![Transformers.js Demo](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Transformers.js-blue)](https://huggingface.co/spaces/Xenova/yolov10-web) [![LearnOpenCV](https://img.shields.io/badge/BlogPost-blue?logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAMAAAC67D%2BPAAAALVBMVEX%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F6%2Bfn6%2Bvq3y%2BJ8rOFSne9Jm%2FQcOlr5DJ7GAAAAB3RSTlMAB2LM94H1yMxlvwAAADNJREFUCFtjZGAEAob%2FQMDIyAJl%2FmFkYmEGM%2F%2F%2BYWRmYWYCMv8BmSxYmUgKkLQhGYawAgApySgfFDPqowAAAABJRU5ErkJggg%3D%3D&logoColor=black&labelColor=gray)](https://learnopencv.com/yolov10/) [![Openbayes Demo](https://img.shields.io/static/v1?label=Demo&message=OpenBayes%E8%B4%9D%E5%BC%8F%E8%AE%A1%E7%AE%97&color=green)](https://openbayes.com/console/public/tutorials/im29uYrnIoz) 
-
-
-<details>
-  <summary>
-  <font size="+1">Abstract</font>
-  </summary>
-Over the past years, YOLOs have emerged as the predominant paradigm in the field of real-time object detection owing to their effective balance between computational cost and detection performance. Researchers have explored the architectural designs, optimization objectives, data augmentation strategies, and others for YOLOs, achieving notable progress. However, the reliance on the non-maximum suppression (NMS) for post-processing hampers the end-to-end deployment of YOLOs and adversely impacts the inference latency. Besides, the design of various components in YOLOs lacks the comprehensive and thorough inspection, resulting in noticeable computational redundancy and limiting the model's capability. It renders the suboptimal efficiency, along with considerable potential for performance improvements. In this work, we aim to further advance the performance-efficiency boundary of YOLOs from both the post-processing and the model architecture. To this end, we first present the consistent dual assignments for NMS-free training of YOLOs, which brings the competitive performance and low inference latency simultaneously. Moreover, we introduce the holistic efficiency-accuracy driven model design strategy for YOLOs. We comprehensively optimize various components of YOLOs from both the efficiency and accuracy perspectives, which greatly reduces the computational overhead and enhances the capability. The outcome of our effort is a new generation of YOLO series for real-time end-to-end object detection, dubbed YOLOv10. Extensive experiments show that YOLOv10 achieves the state-of-the-art performance and efficiency across various model scales. For example, our YOLOv10-S is 1.8$\times$ faster than RT-DETR-R18 under the similar AP on COCO, meanwhile enjoying 2.8$\times$ smaller number of parameters and FLOPs. Compared with YOLOv9-C, YOLOv10-B has 46\% less latency and 25\% fewer parameters for the same performance.
-</details>
-
-## Notes
-- 2024/05/31: Please use the [exported format](https://github.com/THU-MIG/yolov10?tab=readme-ov-file#export) for benchmark. In the non-exported format, e.g., pytorch, the speed of YOLOv10 is biased because the unnecessary `cv2` and `cv3` operations in the `v10Detect` are executed during inference.
-- 2024/05/30: We provide [some clarifications and suggestions](https://github.com/THU-MIG/yolov10/issues/136) for detecting smaller objects or objects in the distance with YOLOv10. Thanks to [SkalskiP](https://github.com/SkalskiP)!
-- 2024/05/27: We have updated the [checkpoints](https://huggingface.co/collections/jameslahm/yolov10-665b0d90b0b5bb85129460c2) with class names, for ease of use.
-
-## UPDATES 🔥
-- 2024/06/01: Thanks to [ErlanggaYudiPradana](https://github.com/rlggyp) for the integration with [C++ | OpenVINO | OpenCV](https://github.com/rlggyp/YOLOv10-OpenVINO-CPP-Inference)
-- 2024/06/01: Thanks to [NielsRogge](https://github.com/NielsRogge) and [AK](https://x.com/_akhaliq) for hosting the models on the HuggingFace Hub!
-- 2024/05/31: Build [yolov10-jetson](https://github.com/Seeed-Projects/jetson-examples/blob/main/reComputer/scripts/yolov10/README.md) docker image by [youjiang](https://github.com/yuyoujiang)!
-- 2024/05/31: Thanks to [mohamedsamirx](https://github.com/mohamedsamirx) for the integration with [BoTSORT, DeepOCSORT, OCSORT, HybridSORT, ByteTrack, StrongSORT using BoxMOT library](https://colab.research.google.com/drive/1-QV2TNfqaMsh14w5VxieEyanugVBG14V?usp=sharing)!
-- 2024/05/31: Thanks to [kaylorchen](https://github.com/kaylorchen) for the integration with [rk3588](https://github.com/kaylorchen/rk3588-yolo-demo)!
-- 2024/05/30: Thanks to [eaidova](https://github.com/eaidova) for the integration with [OpenVINO™](https://github.com/openvinotoolkit/openvino_notebooks/blob/0ba3c0211bcd49aa860369feddffdf7273a73c64/notebooks/yolov10-optimization/yolov10-optimization.ipynb)!
-- 2024/05/29: Add the gradio demo for running the models locally. Thanks to [AK](https://x.com/_akhaliq)!
-- 2024/05/27: Thanks to [sujanshresstha](sujanshresstha) for the integration with [DeepSORT](https://github.com/sujanshresstha/YOLOv10_DeepSORT.git)!
-- 2024/05/26: Thanks to [CVHub520](https://github.com/CVHub520) for the integration into [X-AnyLabeling](https://github.com/CVHub520/X-AnyLabeling)!
-- 2024/05/26: Thanks to [DanielSarmiento04](https://github.com/DanielSarmiento04) for integrate in [c++ | ONNX | OPENCV](https://github.com/DanielSarmiento04/yolov10cpp)!
-- 2024/05/25: Add [Transformers.js demo](https://huggingface.co/spaces/Xenova/yolov10-web) and onnx weights(yolov10[n](https://huggingface.co/onnx-community/yolov10n)/[s](https://huggingface.co/onnx-community/yolov10s)/[m](https://huggingface.co/onnx-community/yolov10m)/[b](https://huggingface.co/onnx-community/yolov10b)/[l](https://huggingface.co/onnx-community/yolov10l)/[x](https://huggingface.co/onnx-community/yolov10x)). Thanks to [xenova](https://github.com/xenova)!
-- 2024/05/25: Add [colab demo](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov10-object-detection-on-custom-dataset.ipynb#scrollTo=SaKTSzSWnG7s), [HuggingFace Demo](https://huggingface.co/spaces/kadirnar/Yolov10), and [HuggingFace Model Page](https://huggingface.co/kadirnar/Yolov10). Thanks to [SkalskiP](https://github.com/SkalskiP) and [kadirnar](https://github.com/kadirnar)! 
+Ao Wang, Hui Chen, Lihao Liu, Kai Chen, Zijia Lin, Jungong Han, and Guiguang Ding
 
 ## Performance
 COCO
@@ -53,60 +25,111 @@ COCO
 | [YOLOv10-X](https://huggingface.co/jameslahm/yolov10x) |   640  |     29.5M    |   160.4G   |     54.4%     | 10.70ms |
 
 ## Installation
-`conda` virtual environment is recommended. 
+
+## 環境
+
+- pyenv
+- Python 3.9.13 (公式のバージョンと合わせる)
+- cuda 11.8
+
+## Setup
+
+### 1. リポジトリをクローン
+
+```bash
+git clone git@github.com:TechC-SugarCane/train-YOLOv10.git
+
+cd train-YOLOv10
 ```
-conda create -n yolov10 python=3.9
-conda activate yolov10
-pip install -r requirements.txt
+
+### 2. Pythonの環境構築
+
+```bash
+pyenv install
+```
+
+### 3. 仮想環境を作成
+
+```bash
+python -m venv .venv
+```
+
+### 4. 仮想環境を有効化
+
+```bash
+# mac
+source .venv/bin/activate
+
+# windows
+.venv\Scripts\activate
+```
+
+※ 環境から抜ける場合は、`deactivate`コマンドを実行してください。
+
+### 5. 依存パッケージをインストール
+
+```bash
+# CPUで推論を行う場合
+pip install -r requirements-cpu.txt
+
+# GPUで推論を行う場合
+pip install -r requirements-gpu.txt
+
+# 共通
 pip install -e .
 ```
-## Demo
-```
-python app.py
-# Please visit http://127.0.0.1:7860
+
+### 6. デフォルトセッティングを変更
+
+```bash
+# datasetsのディレクトリを現在のディレクトリに変更
+# デフォルトだと../datasetsが設定されている
+yolo settings datasets_dir=.
 ```
 
-## Validation
-[`yolov10n`](https://huggingface.co/jameslahm/yolov10n)  [`yolov10s`](https://huggingface.co/jameslahm/yolov10s)  [`yolov10m`](https://huggingface.co/jameslahm/yolov10m)  [`yolov10b`](https://huggingface.co/jameslahm/yolov10b)  [`yolov10l`](https://huggingface.co/jameslahm/yolov10l)  [`yolov10x`](https://huggingface.co/jameslahm/yolov10x)  
-```
-yolo val model=jameslahm/yolov10{n/s/m/b/l/x} data=coco.yaml batch=256
+## Training
+
+事前学習済みモデルとして`yolov10x.pt`を使用するので、[公式GitHubのリリース](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10x.pt)からダウンロードして`weights`ディレクトリに配置してください。
+
+また、学習に使用するデータセットは[`datasets/README.md`](./datasets/README.md)に従い、`datasets`ディレクトリに配置してください。
+
+学習後の結果は`runs/detect/<name(番号)>`に保存されます。
+
+学習でよいスコアが出た場合は、`runs/detect/<name(番号)>/`にREADME.mdを作成してください。
+その際は、[`runs/detect/README.md`](./runs/detect/README.md)を参考に作成してください。
+
+```bash
+# sugarcane
+yolo detect train cfg='cfg/sugarcane.yaml' data=datasets/sugarcane/data.yaml model=weights/yolov10x.pt name='yolov10x-sugarcane' epochs=300 batch=16 imgsz=640 device=0
+
+# pineapple
+yolo detect train cfg='cfg/pineapple.yaml' data=datasets/pineapple/data.yaml model=weights/yolov10x.pt name='yolov10x-pineapple' epochs=300 batch=16 imgsz=640 device=0
 ```
 
-Or
-```python
-from ultralytics import YOLOv10
+※ 上記を実行すると`yolov8n.pt`がダウンロードされますが、AMPというものの確認用に追加されているだけらしいので気にしなくて大丈夫です。
+詳しくは[#106](https://github.com/THU-MIG/yolov10/issues/106)を参照してください。
 
-model = YOLOv10.from_pretrained('jameslahm/yolov10{n/s/m/b/l/x}')
-# or
-# wget https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10{n/s/m/b/l/x}.pt
-model = YOLOv10('yolov10{n/s/m/b/l/x}.pt')
+ハイパーパラメーターは自由に調整してください。`cfg/`にあります。このファイルの`Hyperparameters`の部分でハイパラ関連の設定ができます。
 
-model.val(data='coco.yaml', batch=256)
-```
+- サトウキビ: `sugarcane.yaml`
+- パイナップル: `pineapple.yaml`
 
+## コントリビューター向けガイドライン
 
-## Training 
-```
-yolo detect train data=coco.yaml model=yolov10n/s/m/b/l/x.yaml epochs=500 batch=256 imgsz=640 device=0,1,2,3,4,5,6,7
-```
+コントリビューター向けのガイドラインについては、こちらの[CONTRIBUTING.md](https://github.com/TechC-SugarCane/.github/blob/main/CONTRIBUTING.md)を参照してください。
 
-Or
-```python
-from ultralytics import YOLOv10
+### ※ 注意
 
-model = YOLOv10()
-# If you want to finetune the model with pretrained weights, you could load the 
-# pretrained weights like below
-# model = YOLOv10.from_pretrained('jameslahm/yolov10{n/s/m/b/l/x}')
-# or
-# wget https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10{n/s/m/b/l/x}.pt
-# model = YOLOv10('yolov10{n/s/m/b/l/x}.pt')
+このリポジトリはforkなので、Pull Requestを送る際はこのリポジトリに対して送るようにしてください。
 
-model.train(data='coco.yaml', epochs=500, batch=256, imgsz=640)
-```
+デフォルトだとbaseリポジトリが公式のリポジトリになっているので、注意してください。
+
+`Comparing changes`でのドロップダウン(`base repository`)を、`TechC-SugarCane/train-YOLOv10`に変更してください。画面が遷移したら大丈夫です。
 
 ## Push to hub to 🤗
 
+後で活用
+
 Optionally, you can push your fine-tuned model to the [Hugging Face hub](https://huggingface.co/) as a public or private model:
 
 ```python
@@ -117,25 +140,8 @@ model.push_to_hub("<your-hf-username-or-organization/yolov10-finetuned-crop-dete
 model.push_to_hub("<your-hf-username-or-organization/yolov10-finetuned-crop-detection", private=True)
 ```
 
-## Prediction
-Note that a smaller confidence threshold can be set to detect smaller objects or objects in the distance. Please refer to [here](https://github.com/THU-MIG/yolov10/issues/136) for details.
-```
-yolo predict model=jameslahm/yolov10{n/s/m/b/l/x}
-```
-
-Or
-```python
-from ultralytics import YOLOv10
-
-model = YOLOv10.from_pretrained('jameslahm/yolov10{n/s/m/b/l/x}')
-# or
-# wget https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10{n/s/m/b/l/x}.pt
-model = YOLOv10('yolov10{n/s/m/b/l/x}.pt')
-
-model.predict()
-```
-
 ## Export
+後で活用
 ```
 # End-to-End ONNX
 yolo export model=jameslahm/yolov10{n/s/m/b/l/x} format=onnx opset=13 simplify
@@ -149,33 +155,3 @@ trtexec --onnx=yolov10n/s/m/b/l/x.onnx --saveEngine=yolov10n/s/m/b/l/x.engine --
 # Predict with TensorRT
 yolo predict model=yolov10n/s/m/b/l/x.engine
 ```
-
-Or
-```python
-from ultralytics import YOLOv10
-
-model = YOLOv10.from_pretrained('jameslahm/yolov10{n/s/m/b/l/x}')
-# or
-# wget https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10{n/s/m/b/l/x}.pt
-model = YOLOv10('yolov10{n/s/m/b/l/x}.pt')
-
-model.export(...)
-```
-
-## Acknowledgement
-
-The code base is built with [ultralytics](https://github.com/ultralytics/ultralytics) and [RT-DETR](https://github.com/lyuwenyu/RT-DETR).
-
-Thanks for the great implementations! 
-
-## Citation
-
-If our code or models help your work, please cite our paper:
-```BibTeX
-@article{wang2024yolov10,
-  title={YOLOv10: Real-Time End-to-End Object Detection},
-  author={Wang, Ao and Chen, Hui and Liu, Lihao and Chen, Kai and Lin, Zijia and Han, Jungong and Ding, Guiguang},
-  journal={arXiv preprint arXiv:2405.14458},
-  year={2024}
-}
-```

From 0df6e4b2565f1297d5463f54c2226b57309a07d2 Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Thu, 1 Aug 2024 21:49:16 +0900
Subject: [PATCH 12/15] =?UTF-8?q?fix:=20cfg=E3=81=AEdefault.yaml=E3=81=AB?=
 =?UTF-8?q?=E5=90=88=E3=82=8F=E3=81=9B=E3=81=9F=E4=BF=AE=E6=AD=A3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 cfg/pineapple.yaml | 2 +-
 cfg/sugarcane.yaml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/cfg/pineapple.yaml b/cfg/pineapple.yaml
index bc64897e..bd074b10 100644
--- a/cfg/pineapple.yaml
+++ b/cfg/pineapple.yaml
@@ -82,7 +82,7 @@ keras: False # (bool) use Kera=s
 optimize: False # (bool) TorchScript: optimize for mobile
 int8: False # (bool) CoreML/TF INT8 quantization
 dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes
-simplify: False # (bool) ONNX: simplify model
+simplify: False # (bool) ONNX: simplify model using `onnxslim`
 opset: # (int, optional) ONNX: opset version
 workspace: 4 # (int) TensorRT: workspace size (GB)
 nms: False # (bool) CoreML: add NMS
diff --git a/cfg/sugarcane.yaml b/cfg/sugarcane.yaml
index bc64897e..bd074b10 100644
--- a/cfg/sugarcane.yaml
+++ b/cfg/sugarcane.yaml
@@ -82,7 +82,7 @@ keras: False # (bool) use Kera=s
 optimize: False # (bool) TorchScript: optimize for mobile
 int8: False # (bool) CoreML/TF INT8 quantization
 dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes
-simplify: False # (bool) ONNX: simplify model
+simplify: False # (bool) ONNX: simplify model using `onnxslim`
 opset: # (int, optional) ONNX: opset version
 workspace: 4 # (int) TensorRT: workspace size (GB)
 nms: False # (bool) CoreML: add NMS

From 6c20b529c2c99190a3b82ff24ed0d51fa327f239 Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Sun, 4 Aug 2024 15:08:23 +0900
Subject: [PATCH 13/15] =?UTF-8?q?fix:=20matplotlib=E3=81=AE=E3=82=A4?=
 =?UTF-8?q?=E3=83=B3=E3=82=B9=E3=83=88=E3=83=BC=E3=83=A9=E3=83=BC=E3=81=A7?=
 =?UTF-8?q?=E3=82=A8=E3=83=A9=E3=83=BC=E3=81=8C=E5=87=BA=E3=82=8B=E3=81=AE?=
 =?UTF-8?q?=E3=82=92=E8=A7=A3=E6=B6=88?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 requirements-gpu.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/requirements-gpu.txt b/requirements-gpu.txt
index c22dda71..0fb576d7 100644
--- a/requirements-gpu.txt
+++ b/requirements-gpu.txt
@@ -36,7 +36,7 @@ jsonschema-specifications==2023.12.1
 kiwisolver==1.4.5
 markdown-it-py==3.0.0
 MarkupSafe==2.1.5
-matplotlib==3.9.1
+matplotlib==3.9.0
 mdurl==0.1.2
 mpmath==1.3.0
 networkx==3.2.1

From e9586cd9b8c03574e35ca3d317e4dd5d371b305b Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Sun, 4 Aug 2024 16:59:28 +0900
Subject: [PATCH 14/15] =?UTF-8?q?feat:=20datasets=E3=81=AE=E5=A0=B4?=
 =?UTF-8?q?=E6=89=80=E3=81=A8=E3=83=80=E3=82=A6=E3=83=B3=E3=83=AD=E3=83=BC?=
 =?UTF-8?q?=E3=83=89=E6=96=B9=E6=B3=95=E3=82=92=E5=A4=89=E6=9B=B4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 .gitignore                             |  3 ++
 README.md                              | 22 ++++++++--
 datasets/.gitignore                    |  8 ----
 datasets/README.md                     | 56 --------------------------
 datasets/pineapple/README.dataset.txt  |  6 ---
 datasets/pineapple/README.roboflow.txt | 27 -------------
 datasets/pineapple/data.yaml           | 14 -------
 datasets/sugarcane/README.dataset.txt  |  6 ---
 datasets/sugarcane/README.roboflow.txt | 27 -------------
 datasets/sugarcane/data.yaml           | 14 -------
 10 files changed, 22 insertions(+), 161 deletions(-)
 delete mode 100644 datasets/.gitignore
 delete mode 100644 datasets/README.md
 delete mode 100644 datasets/pineapple/README.dataset.txt
 delete mode 100644 datasets/pineapple/README.roboflow.txt
 delete mode 100644 datasets/pineapple/data.yaml
 delete mode 100644 datasets/sugarcane/README.dataset.txt
 delete mode 100644 datasets/sugarcane/README.roboflow.txt
 delete mode 100644 datasets/sugarcane/data.yaml

diff --git a/.gitignore b/.gitignore
index f99e12a8..7c148445 100644
--- a/.gitignore
+++ b/.gitignore
@@ -159,3 +159,6 @@ pnnx*
 
 # Autogenerated files for tests
 /ultralytics/assets/
+
+# datasets
+yolov10-datasets/
diff --git a/README.md b/README.md
index 458d101d..12e878de 100644
--- a/README.md
+++ b/README.md
@@ -91,7 +91,23 @@ yolo settings datasets_dir=.
 
 事前学習済みモデルとして`yolov10x.pt`を使用するので、[公式GitHubのリリース](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10x.pt)からダウンロードして`weights`ディレクトリに配置してください。
 
-また、学習に使用するデータセットは[`datasets/README.md`](./datasets/README.md)に従い、`datasets`ディレクトリに配置してください。
+学習に使用するデータセットはRoboflowというサービスを使用して作成しています。
+
+学習や評価に使用するデータセットは、
+
+- [サトウキビ](https://universe.roboflow.com/hoku/sugarcane-3vhxz/dataset/11)
+- [パイナップル](https://universe.roboflow.com/hoku/pineapple-thsih/dataset/7)
+
+にありますが、手動でダウンロードするのは面倒なので`huggingface`にdatasetsをまとめてあります。
+
+下記コマンドを実行して、datasetsをダウンロードしてください。
+
+```bash
+# Make sure you have git-lfs installed (https://git-lfs.com)
+git lfs install
+
+git clone https://huggingface.co/datasets/TechC-SugarCane/yolov10-datasets
+```
 
 学習後の結果は`runs/detect/<name(番号)>`に保存されます。
 
@@ -100,10 +116,10 @@ yolo settings datasets_dir=.
 
 ```bash
 # sugarcane
-yolo detect train cfg='cfg/sugarcane.yaml' data=datasets/sugarcane/data.yaml model=weights/yolov10x.pt name='yolov10x-sugarcane' epochs=300 batch=16 imgsz=640 device=0
+yolo detect train cfg='cfg/sugarcane.yaml' data=yolov10-datasets/sugarcane/data.yaml model=weights/yolov10x.pt name='yolov10x-sugarcane' epochs=300 batch=16 imgsz=640 device=0
 
 # pineapple
-yolo detect train cfg='cfg/pineapple.yaml' data=datasets/pineapple/data.yaml model=weights/yolov10x.pt name='yolov10x-pineapple' epochs=300 batch=16 imgsz=640 device=0
+yolo detect train cfg='cfg/pineapple.yaml' data=yolov10-datasets/pineapple/data.yaml model=weights/yolov10x.pt name='yolov10x-pineapple' epochs=300 batch=16 imgsz=640 device=0
 ```
 
 ※ 上記を実行すると`yolov8n.pt`がダウンロードされますが、AMPというものの確認用に追加されているだけらしいので気にしなくて大丈夫です。
diff --git a/datasets/.gitignore b/datasets/.gitignore
deleted file mode 100644
index 116719a0..00000000
--- a/datasets/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-# 全てのファイルを除外
-*/*
-
-# 例外
-!.gitignore
-!README.md
-!**/data.yaml
-!**/README*.txt
diff --git a/datasets/README.md b/datasets/README.md
deleted file mode 100644
index ad9705f2..00000000
--- a/datasets/README.md
+++ /dev/null
@@ -1,56 +0,0 @@
-# カスタムデータセット
-
-Roboflowというサービスを使用してデーターセットを作成しています。
-
-学習や評価に使用するデータセットは、
-
-- [サトウキビ](https://universe.roboflow.com/hoku/sugarcane-3vhxz/dataset/11)
-- [パイナップル](https://universe.roboflow.com/hoku/pineapple-thsih/dataset/7)
-
-から`YOLO v8`形式でダウンロードし、下記を参考に`train`, `valid`, `test`を各ディレクトリに配置してください。
-
-## ディレクトリ構造
-
-```plaintext
-datasets/
-    .gitignore
-    README.md
-    sugarcane/
-        data.yaml
-        README.dataset.txt
-        README.roboflow.txt
-        train/
-            images/
-                ...
-            labels/
-                ...
-        valid/
-            images/
-                ...
-            labels/
-                ...
-        test/
-            images/
-                ...
-            labels/
-                ...
-    pineapple/
-        data.yaml
-        README.dataset.txt
-        README.roboflow.txt
-        train/
-            images/
-                ...
-            labels/
-                ...
-        valid/
-            images/
-                ...
-            labels/
-                ...
-        test/
-            images/
-                ...
-            labels/
-                ...
-```
diff --git a/datasets/pineapple/README.dataset.txt b/datasets/pineapple/README.dataset.txt
deleted file mode 100644
index 39eccf2d..00000000
--- a/datasets/pineapple/README.dataset.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-# Pineapple > 2023-07-19 9:57am
-https://universe.roboflow.com/hoku/pineapple-thsih
-
-Provided by a Roboflow user
-License: MIT
-
diff --git a/datasets/pineapple/README.roboflow.txt b/datasets/pineapple/README.roboflow.txt
deleted file mode 100644
index 1e88fe1d..00000000
--- a/datasets/pineapple/README.roboflow.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-
-Pineapple - v7 2023-07-19 9:57am
-==============================
-
-This dataset was exported via roboflow.com on July 31, 2024 at 3:58 PM GMT
-
-Roboflow is an end-to-end computer vision platform that helps you
-* collaborate with your team on computer vision projects
-* collect & organize images
-* understand and search unstructured image data
-* annotate, and create datasets
-* export, train, and deploy computer vision models
-* use active learning to improve your dataset over time
-
-For state of the art Computer Vision training notebooks you can use with this dataset,
-visit https://github.com/roboflow/notebooks
-
-To find over 100k other datasets and pre-trained models, visit https://universe.roboflow.com
-
-The dataset includes 619 images.
-Pineapple are annotated in YOLOv8 format.
-
-The following pre-processing was applied to each image:
-
-No image augmentation techniques were applied.
-
-
diff --git a/datasets/pineapple/data.yaml b/datasets/pineapple/data.yaml
deleted file mode 100644
index 9860cb24..00000000
--- a/datasets/pineapple/data.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-path: datasets/sugarcane # dataset root dir
-train: train/images
-val: valid/images
-test: test/images
-
-nc: 2
-names: ['pineapple', 'weed']
-
-roboflow:
-  workspace: hoku
-  project: pineapple-thsih
-  version: 7
-  license: MIT
-  url: https://universe.roboflow.com/hoku/pineapple-thsih/dataset/7
diff --git a/datasets/sugarcane/README.dataset.txt b/datasets/sugarcane/README.dataset.txt
deleted file mode 100644
index beaa3ab7..00000000
--- a/datasets/sugarcane/README.dataset.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-# sugarcane > 2023-07-19 9:58am
-https://universe.roboflow.com/hoku/sugarcane-3vhxz
-
-Provided by a Roboflow user
-License: CC BY 4.0
-
diff --git a/datasets/sugarcane/README.roboflow.txt b/datasets/sugarcane/README.roboflow.txt
deleted file mode 100644
index 2a8c9dc2..00000000
--- a/datasets/sugarcane/README.roboflow.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-
-sugarcane - v11 2023-07-19 9:58am
-==============================
-
-This dataset was exported via roboflow.com on July 9, 2024 at 5:16 AM GMT
-
-Roboflow is an end-to-end computer vision platform that helps you
-* collaborate with your team on computer vision projects
-* collect & organize images
-* understand and search unstructured image data
-* annotate, and create datasets
-* export, train, and deploy computer vision models
-* use active learning to improve your dataset over time
-
-For state of the art Computer Vision training notebooks you can use with this dataset,
-visit https://github.com/roboflow/notebooks
-
-To find over 100k other datasets and pre-trained models, visit https://universe.roboflow.com
-
-The dataset includes 811 images.
-Sugarcane are annotated in YOLOv8 format.
-
-The following pre-processing was applied to each image:
-
-No image augmentation techniques were applied.
-
-
diff --git a/datasets/sugarcane/data.yaml b/datasets/sugarcane/data.yaml
deleted file mode 100644
index a82f2cb1..00000000
--- a/datasets/sugarcane/data.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-path: datasets/sugarcane # dataset root dir
-train: train/images
-val: valid/images
-test: test/images
-
-nc: 2
-names: ['sugarcane', 'weed']
-
-roboflow:
-  workspace: hoku
-  project: sugarcane-3vhxz
-  version: 11
-  license: CC BY 4.0
-  url: https://universe.roboflow.com/hoku/sugarcane-3vhxz/dataset/11

From d4500d81d718c3b38dadec97f54d38f5a6661ff9 Mon Sep 17 00:00:00 2001
From: KorRyu3 <146335193+KorRyu3@users.noreply.github.com>
Date: Sun, 4 Aug 2024 17:06:48 +0900
Subject: [PATCH 15/15] docs: update README.md

---
 README.md | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/README.md b/README.md
index 12e878de..a7cdacb8 100644
--- a/README.md
+++ b/README.md
@@ -107,6 +107,9 @@ yolo settings datasets_dir=.
 git lfs install
 
 git clone https://huggingface.co/datasets/TechC-SugarCane/yolov10-datasets
+
+# git push時に発生するエラーを無効化
+git config lfs.https://github.com/TechC-SugarCane/train-YOLOv10.git/info/lfs.locksverify false
 ```
 
 学習後の結果は`runs/detect/<name(番号)>`に保存されます。