Add Quickstart Docs YouTube video (#5733)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
Glenn Jocher 2023-10-16 12:17:16 +02:00 committed by GitHub
parent cedce60f8c
commit 437b4306d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 24 additions and 15 deletions

View File

@ -118,6 +118,17 @@ Ultralytics provides various installation methods including pip, conda, and Dock
See the `ultralytics` [requirements.txt](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) file for a list of dependencies. Note that all examples above install all required dependencies. See the `ultralytics` [requirements.txt](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) file for a list of dependencies. Note that all examples above install all required dependencies.
<p align="center">
<br>
<iframe width="720" height="405" src="https://www.youtube.com/embed/MWq1UxqTClU?si=nHAW-lYDzrz68jR0"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Ultralytics YOLO for Object Detection: Quickstart Guide for Installation and Setup.
</p>
!!! tip "Tip" !!! tip "Tip"
PyTorch requirements vary by operating system and CUDA requirements, so it's recommended to install PyTorch first following instructions at [https://pytorch.org/get-started/locally](https://pytorch.org/get-started/locally). PyTorch requirements vary by operating system and CUDA requirements, so it's recommended to install PyTorch first following instructions at [https://pytorch.org/get-started/locally](https://pytorch.org/get-started/locally).

View File

@ -61,15 +61,14 @@ class SAM(Model):
Performs segmentation prediction on the given image or video source. Performs segmentation prediction on the given image or video source.
Args: Args:
source: Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object.
stream (bool, optional): If True, enables real-time streaming. Defaults to False. stream (bool, optional): If True, enables real-time streaming. Defaults to False.
bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None.
points (list, optional): List of points for prompted segmentation. Defaults to None. points (list, optional): List of points for prompted segmentation. Defaults to None.
labels (list, optional): List of labels for prompted segmentation. Defaults to None. labels (list, optional): List of labels for prompted segmentation. Defaults to None.
**kwargs: Additional keyword arguments.
Returns: Returns:
The segmentation masks. (list): The model predictions.
""" """
overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024) overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024)
kwargs.update(overrides) kwargs.update(overrides)
@ -81,15 +80,14 @@ class SAM(Model):
Alias for the 'predict' method. Alias for the 'predict' method.
Args: Args:
source: Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object.
stream (bool, optional): If True, enables real-time streaming. Defaults to False. stream (bool, optional): If True, enables real-time streaming. Defaults to False.
bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None.
points (list, optional): List of points for prompted segmentation. Defaults to None. points (list, optional): List of points for prompted segmentation. Defaults to None.
labels (list, optional): List of labels for prompted segmentation. Defaults to None. labels (list, optional): List of labels for prompted segmentation. Defaults to None.
**kwargs: Additional keyword arguments.
Returns: Returns:
The segmentation masks. (list): The model predictions.
""" """
return self.predict(source, stream, bboxes, points, labels, **kwargs) return self.predict(source, stream, bboxes, points, labels, **kwargs)
@ -112,6 +110,6 @@ class SAM(Model):
Provides a mapping from the 'segment' task to its corresponding 'Predictor'. Provides a mapping from the 'segment' task to its corresponding 'Predictor'.
Returns: Returns:
dict: A dictionary mapping the 'segment' task to its corresponding 'Predictor'. (dict): A dictionary mapping the 'segment' task to its corresponding 'Predictor'.
""" """
return {'segment': {'predictor': Predictor}} return {'segment': {'predictor': Predictor}}

View File

@ -77,7 +77,7 @@ class Predictor(BasePredictor):
im (torch.Tensor | List[np.ndarray]): BCHW tensor format or list of HWC numpy arrays. im (torch.Tensor | List[np.ndarray]): BCHW tensor format or list of HWC numpy arrays.
Returns: Returns:
torch.Tensor: The preprocessed image tensor. (torch.Tensor): The preprocessed image tensor.
""" """
if self.im is not None: if self.im is not None:
return self.im return self.im
@ -105,7 +105,7 @@ class Predictor(BasePredictor):
im (List[np.ndarray]): List containing images in HWC numpy array format. im (List[np.ndarray]): List containing images in HWC numpy array format.
Returns: Returns:
List[np.ndarray]: List of transformed images. (List[np.ndarray]): List of transformed images.
""" """
assert len(im) == 1, 'SAM model does not currently support batched inference' assert len(im) == 1, 'SAM model does not currently support batched inference'
letterbox = LetterBox(self.args.imgsz, auto=False, center=False) letterbox = LetterBox(self.args.imgsz, auto=False, center=False)
@ -126,7 +126,7 @@ class Predictor(BasePredictor):
multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False. multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False.
Returns: Returns:
tuple: Contains the following three elements. (tuple): Contains the following three elements.
- np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks. - np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks.
- np.ndarray: An array of length C containing quality scores predicted by the model for each mask. - np.ndarray: An array of length C containing quality scores predicted by the model for each mask.
- np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256. - np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256.
@ -155,7 +155,7 @@ class Predictor(BasePredictor):
multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False. multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False.
Returns: Returns:
tuple: Contains the following three elements. (tuple): Contains the following three elements.
- np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks. - np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks.
- np.ndarray: An array of length C containing quality scores predicted by the model for each mask. - np.ndarray: An array of length C containing quality scores predicted by the model for each mask.
- np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256. - np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256.
@ -234,7 +234,7 @@ class Predictor(BasePredictor):
crop_nms_thresh (float): IoU cutoff for Non-Maximum Suppression (NMS) to remove duplicate masks between crops. crop_nms_thresh (float): IoU cutoff for Non-Maximum Suppression (NMS) to remove duplicate masks between crops.
Returns: Returns:
tuple: A tuple containing segmented masks, confidence scores, and bounding boxes. (tuple): A tuple containing segmented masks, confidence scores, and bounding boxes.
""" """
self.segment_all = True self.segment_all = True
ih, iw = im.shape[2:] ih, iw = im.shape[2:]
@ -434,7 +434,7 @@ class Predictor(BasePredictor):
nms_thresh (float): The IoU threshold for the NMS algorithm. Defaults to 0.7. nms_thresh (float): The IoU threshold for the NMS algorithm. Defaults to 0.7.
Returns: Returns:
T(uple[torch.Tensor, List[int]]): (tuple([torch.Tensor, List[int]])):
- new_masks (torch.Tensor): The processed masks with small regions removed. Shape is (N, H, W). - new_masks (torch.Tensor): The processed masks with small regions removed. Shape is (N, H, W).
- keep (List[int]): The indices of the remaining masks post-NMS, which can be used to filter the boxes. - keep (List[int]): The indices of the remaining masks post-NMS, which can be used to filter the boxes.
""" """