Add Dockerfile-conda FROM continuumio/miniconda3:latest (#4706)

This commit is contained in:
Glenn Jocher 2023-09-03 23:44:11 +02:00 committed by GitHub
parent 02b857e14c
commit a1c1d6b483
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 152 additions and 28 deletions

View File

@ -144,6 +144,7 @@ jobs:
run: | run: |
coverage xml -o coverage-benchmarks.xml coverage xml -o coverage-benchmarks.xml
- name: Upload Coverage Reports to CodeCov - name: Upload Coverage Reports to CodeCov
if: github.repository == 'ultralytics/ultralytics'
uses: codecov/codecov-action@v3 uses: codecov/codecov-action@v3
with: with:
flags: Benchmarks flags: Benchmarks
@ -235,13 +236,73 @@ jobs:
env: env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
Conda:
if: github.repository == 'ultralytics/ultralytics' && (github.event_name == 'workflow_dispatch' || github.event_name == 'schedule')
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: ['3.11']
defaults:
run:
shell: bash -el {0}
steps:
- uses: conda-incubator/setup-miniconda@v2
with:
python-version: ${{ matrix.python-version }}
mamba-version: "*"
channels: conda-forge,defaults
channel-priority: true
activate-environment: anaconda-client-env
- name: Install Libmamba
run: |
# conda install conda-libmamba-solver
conda config --set solver libmamba
- name: Install Ultralytics package from conda-forge
run: |
conda install -c conda-forge ultralytics
- name: Install pip packages
run: |
pip install pytest 'coremltools>=7.0.b1' # 'openvino-dev>=2023.0'
- name: Check environment
run: |
echo "RUNNER_OS is ${{ runner.os }}"
echo "GITHUB_EVENT_NAME is ${{ github.event_name }}"
echo "GITHUB_WORKFLOW is ${{ github.workflow }}"
echo "GITHUB_ACTOR is ${{ github.actor }}"
echo "GITHUB_REPOSITORY is ${{ github.repository }}"
echo "GITHUB_REPOSITORY_OWNER is ${{ github.repository_owner }}"
python --version
conda list
- name: Test CLI
run: |
yolo predict model=yolov8n.pt imgsz=320
yolo train model=yolov8n.pt data=coco8.yaml epochs=1 imgsz=32
yolo val model=yolov8n.pt data=coco8.yaml imgsz=32
yolo export model=yolov8n.pt format=torchscript imgsz=160
- name: Test Python
run: |
python -c "
from ultralytics import YOLO
model = YOLO('yolov8n.pt')
results = model.train(data='coco8.yaml', epochs=3, imgsz=160)
results = model.val(imgsz=160)
results = model.predict(imgsz=160)
results = model.export(format='onnx', imgsz=160)
"
- name: PyTest
run: |
git clone https://github.com/ultralytics/ultralytics
pytest ultralytics/tests/test_cli.py # full tests fail due to openvino export failure
Summary: Summary:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [HUB, Benchmarks, Tests, GPU] # Add job names that you want to check for failure needs: [HUB, Benchmarks, Tests, GPU, Conda] # Add job names that you want to check for failure
if: always() # This ensures the job runs even if previous jobs fail if: always() # This ensures the job runs even if previous jobs fail
steps: steps:
- name: Check for failure and notify - name: Check for failure and notify
if: (needs.HUB.result == 'failure' || needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.GPU.result == 'failure') && github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push') if: (needs.HUB.result == 'failure' || needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.GPU.result == 'failure' || needs.Conda.result == 'failure') && github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push')
uses: slackapi/slack-github-action@v1.24.0 uses: slackapi/slack-github-action@v1.24.0
with: with:
payload: | payload: |

View File

@ -24,6 +24,9 @@ on:
Dockerfile-python: Dockerfile-python:
type: boolean type: boolean
description: Use Dockerfile-python description: Use Dockerfile-python
Dockerfile-conda:
type: boolean
description: Use Dockerfile-conda
push: push:
type: boolean type: boolean
description: Push images to Docker Hub description: Push images to Docker Hub
@ -36,9 +39,15 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
fail-fast: false fail-fast: false
max-parallel: 5 max-parallel: 6
matrix: matrix:
include: include:
- dockerfile: "Dockerfile"
tags: "latest"
platforms: "linux/amd64"
- dockerfile: "Dockerfile-cpu"
tags: "latest-cpu"
platforms: "linux/amd64"
- dockerfile: "Dockerfile-arm64" - dockerfile: "Dockerfile-arm64"
tags: "latest-arm64" tags: "latest-arm64"
platforms: "linux/arm64" platforms: "linux/arm64"
@ -48,12 +57,9 @@ jobs:
- dockerfile: "Dockerfile-python" - dockerfile: "Dockerfile-python"
tags: "latest-python" tags: "latest-python"
platforms: "linux/amd64" platforms: "linux/amd64"
- dockerfile: "Dockerfile-cpu" # - dockerfile: "Dockerfile-conda"
tags: "latest-cpu" # tags: "latest-conda"
platforms: "linux/amd64" # platforms: "linux/amd64"
- dockerfile: "Dockerfile"
tags: "latest"
platforms: "linux/amd64"
steps: steps:
- name: Checkout repo - name: Checkout repo
uses: actions/checkout@v3 uses: actions/checkout@v3
@ -109,16 +115,16 @@ jobs:
-t ultralytics/ultralytics:${{ steps.get_version.outputs.version_tag }} . -t ultralytics/ultralytics:${{ steps.get_version.outputs.version_tag }} .
- name: Run Tests - name: Run Tests
if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' # arm64 images not supported on GitHub CI runners if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile-conda' # arm64 images not supported on GitHub CI runners
run: docker run ultralytics/ultralytics:${{ matrix.tags }} /bin/bash -c "pip install pytest && pytest tests" run: docker run ultralytics/ultralytics:${{ matrix.tags }} /bin/bash -c "pip install pytest && pytest tests"
- name: Run Benchmarks - name: Run Benchmarks
# WARNING: Dockerfile (GPU) error on TF.js export 'module 'numpy' has no attribute 'object'. # WARNING: Dockerfile (GPU) error on TF.js export 'module 'numpy' has no attribute 'object'.
if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile' # arm64 images not supported on GitHub CI runners if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile' && matrix.dockerfile != 'Dockerfile-conda' # arm64 images not supported on GitHub CI runners
run: docker run ultralytics/ultralytics:${{ matrix.tags }} yolo benchmark model=yolov8n.pt imgsz=160 verbose=0.26 run: docker run ultralytics/ultralytics:${{ matrix.tags }} yolo benchmark model=yolov8n.pt imgsz=160 verbose=0.26
- name: Push Docker Image with Ultralytics version tag - name: Push Docker Image with Ultralytics version tag
if: (github.event_name == 'push' || (github.event.inputs[matrix.dockerfile] == 'true' && github.event.inputs.push == 'true')) && steps.check_tag.outputs.exists == 'false' if: (github.event_name == 'push' || (github.event.inputs[matrix.dockerfile] == 'true' && github.event.inputs.push == 'true')) && steps.check_tag.outputs.exists == 'false' && matrix.dockerfile != 'Dockerfile-conda'
run: | run: |
docker push ultralytics/ultralytics:${{ steps.get_version.outputs.version_tag }} docker push ultralytics/ultralytics:${{ steps.get_version.outputs.version_tag }}

37
docker/Dockerfile-conda Normal file
View File

@ -0,0 +1,37 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest-conda image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is optimized for Ultralytics Anaconda (https://anaconda.org/conda-forge/ultralytics) installation and usage
# Start FROM miniconda3 image https://hub.docker.com/r/continuumio/miniconda3
FROM continuumio/miniconda3:latest
# Downloads to user config dir
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
# Install linux packages
RUN apt update \
&& apt install --no-install-recommends -y libgl1-mesa-glx
# Copy contents
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt .
# Install conda packages
# mkl required to fix 'OSError: libmkl_intel_lp64.so.2: cannot open shared object file: No such file or directory'
RUN conda config --set solver libmamba && \
conda install -c conda-forge ultralytics mkl
# conda install -c pytorch -c nvidia -c conda-forge pytorch torchvision pytorch-cuda=11.8 ultralytics
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-conda && sudo docker build -f docker/Dockerfile-cpu -t $t . && sudo docker push $t
# Run
# t=ultralytics/ultralytics:latest-conda && sudo docker run -it --ipc=host $t
# Pull and Run
# t=ultralytics/ultralytics:latest-conda && sudo docker pull $t && sudo docker run -it --ipc=host $t
# Pull and Run with local volume mounted
# t=ultralytics/ultralytics:latest-conda && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t

View File

@ -37,6 +37,7 @@ RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32
# Remove exported models # Remove exported models
RUN rm -rf tmp RUN rm -rf tmp
# Usage Examples ------------------------------------------------------------------------------------------------------- # Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push # Build and Push

View File

@ -37,6 +37,7 @@ RUN pip install --no-cache paddlepaddle==2.4.2 x2paddle
# Remove exported models # Remove exported models
RUN rm -rf tmp RUN rm -rf tmp
# Usage Examples ------------------------------------------------------------------------------------------------------- # Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push # Build and Push

View File

@ -12,7 +12,7 @@ Welcome to the Ultralytics Integrations page! This page provides an overview of
## Datasets Integrations ## Datasets Integrations
- [Roboflow](/integrations/roboflow/): Facilitate seamless dataset management for Ultralytics models, offering robust annotation, preprocessing, and augmentation capabilities. - [Roboflow](roboflow.md): Facilitate seamless dataset management for Ultralytics models, offering robust annotation, preprocessing, and augmentation capabilities.
## Training Integrations ## Training Integrations

View File

@ -20,7 +20,7 @@ Ultralytics provides various installation methods including pip, conda, and Dock
pip install ultralytics pip install ultralytics
``` ```
You can also install the `ultralytics` package directly from the GitHub repository. This might be useful if you want the latest development version. Make sure to have the Git command-line tool installed on your system. The `@main` command installs the `main` branch and may be modified to another branch, i.e. `@my-branch`, or removed alltogether to default to `main` branch. You can also install the `ultralytics` package directly from the GitHub [repository](https://github.com/ultralytics/ultralytics). This might be useful if you want the latest development version. Make sure to have the Git command-line tool installed on your system. The `@main` command installs the `main` branch and may be modified to another branch, i.e. `@my-branch`, or removed alltogether to default to `main` branch.
```bash ```bash
# Install the ultralytics package from GitHub # Install the ultralytics package from GitHub
@ -44,7 +44,23 @@ Ultralytics provides various installation methods including pip, conda, and Dock
If you are installing in a CUDA environment best practice is to install `ultralytics`, `pytorch` and `pytorch-cuda` in the same command to allow the conda package manager to resolve any conflicts, or else to install `pytorch-cuda` last to allow it override the CPU-specific `pytorch` package if necessary. If you are installing in a CUDA environment best practice is to install `ultralytics`, `pytorch` and `pytorch-cuda` in the same command to allow the conda package manager to resolve any conflicts, or else to install `pytorch-cuda` last to allow it override the CPU-specific `pytorch` package if necessary.
```bash ```bash
# Install all packages together using conda # Install all packages together using conda
conda install -c conda-forge -c pytorch -c nvidia ultralytics pytorch torchvision pytorch-cuda=11.8 conda install -c pytorch -c nvidia -c conda-forge pytorch torchvision pytorch-cuda=11.8 ultralytics
```
### Conda Docker Image
Ultralytics Conda Docker images are also available from [DockerHub](https://hub.docker.com/r/ultralytics/ultralytics). These images are based on [Miniconda3](https://docs.conda.io/projects/miniconda/en/latest/) and are an simple way to start using `ultralytics` in a Conda environment.
```bash
# Set image name as a variable
t=ultralytics/ultralytics:latest-conda
# Pull the latest ultralytics image from Docker Hub
sudo docker pull $t
# Run the ultralytics image in a container with GPU support
sudo docker run -it --ipc=host --gpus all $t # all GPUs
sudo docker run -it --ipc=host --gpus '"device=2,3"' $t # specify GPUs
``` ```
=== "Git clone" === "Git clone"
@ -71,6 +87,7 @@ Ultralytics provides various installation methods including pip, conda, and Dock
- **Dockerfile-cpu:** Ubuntu-based CPU-only version suitable for inference and environments without GPUs. - **Dockerfile-cpu:** Ubuntu-based CPU-only version suitable for inference and environments without GPUs.
- **Dockerfile-jetson:** Tailored for NVIDIA Jetson devices, integrating GPU support optimized for these platforms. - **Dockerfile-jetson:** Tailored for NVIDIA Jetson devices, integrating GPU support optimized for these platforms.
- **Dockerfile-python:** Minimal image with just Python and necessary dependencies, ideal for lightweight applications and development. - **Dockerfile-python:** Minimal image with just Python and necessary dependencies, ideal for lightweight applications and development.
- **Dockerfile-conda:** Based on Miniconda3 with conda installation of ultralytics package.
Below are the commands to get the latest image and execute it: Below are the commands to get the latest image and execute it:
@ -109,8 +126,7 @@ See the `ultralytics` [requirements.txt](https://github.com/ultralytics/ultralyt
## Use Ultralytics with CLI ## Use Ultralytics with CLI
The Ultralytics command line interface (CLI) allows for simple single-line commands without the need for a Python environment. The Ultralytics command line interface (CLI) allows for simple single-line commands without the need for a Python environment. CLI requires no customization or Python code. You can simply run all tasks from the terminal with the `yolo` command. Check out the [CLI Guide](usage/cli.md) to learn more about using YOLOv8 from the command line.
CLI requires no customization or Python code. You can simply run all tasks from the terminal with the `yolo` command. Check out the [CLI Guide](usage/cli.md) to learn more about using YOLOv8 from the command line.
!!! example !!! example

View File

@ -154,7 +154,8 @@ class Exporter:
format = self.args.format.lower() # to lowercase format = self.args.format.lower() # to lowercase
if format in ('tensorrt', 'trt'): # 'engine' aliases if format in ('tensorrt', 'trt'): # 'engine' aliases
format = 'engine' format = 'engine'
if format in ('mlmodel', 'mlpackage', 'mlprogram', 'apple', 'ios'): # 'coreml' aliases if format in ('mlmodel', 'mlpackage', 'mlprogram', 'apple', 'ios', 'coreml'): # 'coreml' aliases
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python' # fix attempt for protobuf<3.20.x errors
format = 'coreml' format = 'coreml'
fmts = tuple(export_formats()['Argument'][1:]) # available export formats fmts = tuple(export_formats()['Argument'][1:]) # available export formats
flags = [x == format for x in fmts] flags = [x == format for x in fmts]

View File

@ -24,7 +24,6 @@ from pathlib import Path
import numpy as np import numpy as np
import torch import torch
from scipy.optimize import linear_sum_assignment
from ultralytics.cfg import get_cfg, get_save_dir from ultralytics.cfg import get_cfg, get_save_dir
from ultralytics.data.utils import check_cls_dataset, check_det_dataset from ultralytics.data.utils import check_cls_dataset, check_det_dataset
@ -226,9 +225,11 @@ class BaseValidator:
iou = iou.cpu().numpy() iou = iou.cpu().numpy()
for i, threshold in enumerate(self.iouv.cpu().tolist()): for i, threshold in enumerate(self.iouv.cpu().tolist()):
if use_scipy: if use_scipy:
# WARNING: known issue that reduces mAP in https://github.com/ultralytics/ultralytics/pull/4708
import scipy # scope import to avoid importing for all commands
cost_matrix = iou * (iou >= threshold) cost_matrix = iou * (iou >= threshold)
if cost_matrix.any(): if cost_matrix.any():
labels_idx, detections_idx = linear_sum_assignment(cost_matrix, maximize=True) labels_idx, detections_idx = scipy.optimize.linear_sum_assignment(cost_matrix, maximize=True)
valid = cost_matrix[labels_idx, detections_idx] > 0 valid = cost_matrix[labels_idx, detections_idx] > 0
if valid.any(): if valid.any():
correct[detections_idx[valid], i] = True correct[detections_idx[valid], i] = True

View File

@ -14,11 +14,11 @@ class HungarianMatcher(nn.Module):
A module implementing the HungarianMatcher, which is a differentiable module to solve the assignment problem in A module implementing the HungarianMatcher, which is a differentiable module to solve the assignment problem in
an end-to-end fashion. an end-to-end fashion.
HungarianMatcher performs optimal assignment over predicted and ground truth bounding boxes using a cost function HungarianMatcher performs optimal assignment over the predicted and ground truth bounding boxes using a cost
that considers classification scores, bounding box coordinates, and optionally, mask predictions. function that considers classification scores, bounding box coordinates, and optionally, mask predictions.
Attributes: Attributes:
cost_gain (dict): Dictionary of cost coefficients for different components: 'class', 'bbox', 'giou', 'mask', and 'dice'. cost_gain (dict): Dictionary of cost coefficients: 'class', 'bbox', 'giou', 'mask', and 'dice'.
use_fl (bool): Indicates whether to use Focal Loss for the classification cost calculation. use_fl (bool): Indicates whether to use Focal Loss for the classification cost calculation.
with_mask (bool): Indicates whether the model makes mask predictions. with_mask (bool): Indicates whether the model makes mask predictions.
num_sample_points (int): The number of sample points used in mask cost calculation. num_sample_points (int): The number of sample points used in mask cost calculation.
@ -26,8 +26,8 @@ class HungarianMatcher(nn.Module):
gamma (float): The gamma factor in Focal Loss calculation. gamma (float): The gamma factor in Focal Loss calculation.
Methods: Methods:
forward(pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=None, gt_mask=None): Computes the assignment forward(pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=None, gt_mask=None): Computes the
between predictions and ground truths for a batch. assignment between predictions and ground truths for a batch.
_cost_mask(bs, num_gts, masks=None, gt_mask=None): Computes the mask cost and dice cost if masks are predicted. _cost_mask(bs, num_gts, masks=None, gt_mask=None): Computes the mask cost and dice cost if masks are predicted.
""" """