mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-22 21:04:21 +08:00
ultralytics 8.1.32
fix CLIP backwards compatibility (#9253)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
7d2e94bbe2
commit
6de99a29e6
@ -69,7 +69,7 @@ def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
|
||||
run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
|
||||
|
||||
|
||||
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM Clip is not supported in Python 3.12")
|
||||
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
|
||||
def test_fastsam(task="segment", model=WEIGHTS_DIR / "FastSAM-s.pt", data="coco8-seg.yaml"):
|
||||
"""Test FastSAM segmentation functionality within Ultralytics."""
|
||||
source = ASSETS / "bus.jpg"
|
||||
|
@ -636,3 +636,10 @@ def test_model_embeddings():
|
||||
for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2
|
||||
assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
|
||||
assert len(model_segment.embed(source=batch, imgsz=32)) == len(batch)
|
||||
|
||||
|
||||
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
|
||||
def test_yolo_world():
|
||||
model = YOLO("yolov8s-world.pt") # no YOLOv8n-world model yet
|
||||
model.set_classes(["tree", "window"])
|
||||
model(ASSETS / "bus.jpg", conf=0.01)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
|
||||
__version__ = "8.1.31"
|
||||
__version__ = "8.1.32"
|
||||
|
||||
from ultralytics.data.explorer.explorer import Explorer
|
||||
from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld
|
||||
|
@ -572,7 +572,7 @@ class WorldModel(DetectionModel):
|
||||
check_requirements("git+https://github.com/openai/CLIP.git")
|
||||
import clip
|
||||
|
||||
if not self.clip_model:
|
||||
if not getattr(self, "clip_model", None): # for backwards compatibility of models lacking clip_model attribute
|
||||
self.clip_model = clip.load("ViT-B/32")[0]
|
||||
device = next(self.clip_model.parameters()).device
|
||||
text_token = clip.tokenize(text).to(device)
|
||||
|
Loading…
x
Reference in New Issue
Block a user