Skip to content

Commit 3fe5d80

Browse files
[Conformance] Ultralytics yolov8n and yolo11n
1 parent faa6bed commit 3fe5d80

File tree

4 files changed

+223
-0
lines changed

4 files changed

+223
-0
lines changed

tests/post_training/data/ptq_reference_data.yaml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,18 @@ torchvision/swin_v2_s_backend_CUDA_FX_TORCH:
102102
type: "OpConversionFailure"
103103
error_message: "Check 'is_conversion_successful' failed at src/frontends/pytorch/src/frontend.cpp:171:\nFrontEnd API failed with OpConversionFailure:\nModel wasn't fully converted.\nSummary:\n-- No conversion rule found for operations: aten.adaptive_avg_pool2d.default, aten.conv2d.default, aten.layer_norm.default, aten.linear.default, aten.matmul.default, aten.pad.default, aten.softmax.int, aten.where.ScalarSelf\n"
104104
message: "Issue-162009"
105+
ultralytics/yolov8n_backend_FP32:
106+
metric_value: 0.6056
107+
ultralytics/yolov8n_backend_FX_TORCH:
108+
metric_value: 0.61417
109+
ultralytics/yolov8n_backend_OV:
110+
metric_value: 0.6188
111+
ultralytics/yolo11n_backend_FP32:
112+
metric_value: 0.6770
113+
ultralytics/yolo11n_backend_FX_TORCH:
114+
metric_value: 0.6735
115+
ultralytics/yolo11n_backend_OV:
116+
metric_value: 0.6752
105117
timm/crossvit_9_240_backend_CUDA_TORCH:
106118
metric_value: 0.7275
107119
timm/crossvit_9_240_backend_FP32:

tests/post_training/model_scope.py

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
from tests.post_training.pipelines.image_classification_torchvision import ImageClassificationTorchvision
3333
from tests.post_training.pipelines.lm_weight_compression import LMWeightCompression
3434
from tests.post_training.pipelines.masked_language_modeling import MaskedLanguageModelingHF
35+
from tests.post_training.pipelines.ultralytics_detection import UltralyticsDetection
3536

3637
QUANTIZATION_MODELS = [
3738
# HF models
@@ -133,6 +134,85 @@
133134
"backends": [BackendType.FX_TORCH, BackendType.CUDA_FX_TORCH, BackendType.OV],
134135
"batch_size": 1,
135136
},
137+
# Ultralytics models
138+
{
139+
"reported_name": "ultralytics/yolov8n",
140+
"model_id": "yolov8n",
141+
"pipeline_cls": UltralyticsDetection,
142+
"compression_params": {
143+
"preset": nncf.QuantizationPreset.MIXED,
144+
"ignored_scope": nncf.IgnoredScope(
145+
types=["mul", "sub", "sigmoid", "__getitem__"],
146+
subgraphs=[
147+
nncf.Subgraph(
148+
inputs=["cat_13", "cat_14", "cat_15"],
149+
outputs=["output"],
150+
)
151+
],
152+
),
153+
},
154+
"backends": [BackendType.FX_TORCH],
155+
"batch_size": 1,
156+
},
157+
{
158+
"reported_name": "ultralytics/yolov8n",
159+
"model_id": "yolov8n",
160+
"pipeline_cls": UltralyticsDetection,
161+
"compression_params": {
162+
"preset": QuantizationPreset.MIXED,
163+
"ignored_scope": nncf.IgnoredScope(
164+
types=["Multiply", "Subtract", "Sigmoid"],
165+
subgraphs=[
166+
nncf.Subgraph(
167+
inputs=["/model.22/Concat", "/model.22/Concat_1", "/model.22/Concat_2"],
168+
outputs=["output0/sink_port_0"],
169+
)
170+
],
171+
),
172+
},
173+
"backends": [BackendType.OV],
174+
"batch_size": 1,
175+
},
176+
{
177+
"reported_name": "ultralytics/yolo11n",
178+
"model_id": "yolo11n",
179+
"pipeline_cls": UltralyticsDetection,
180+
"compression_params": {
181+
"model_type": nncf.ModelType.TRANSFORMER,
182+
"preset": QuantizationPreset.MIXED,
183+
"ignored_scope": nncf.IgnoredScope(
184+
types=["mul", "sub", "sigmoid", "__getitem__"],
185+
subgraphs=[
186+
nncf.Subgraph(
187+
inputs=["cat_13", "cat_14", "cat_15"],
188+
outputs=["output"],
189+
)
190+
],
191+
),
192+
},
193+
"backends": [BackendType.FX_TORCH],
194+
"batch_size": 1,
195+
},
196+
{
197+
"reported_name": "ultralytics/yolo11n",
198+
"model_id": "yolo11n",
199+
"pipeline_cls": UltralyticsDetection,
200+
"compression_params": {
201+
"model_type": nncf.ModelType.TRANSFORMER,
202+
"preset": QuantizationPreset.MIXED,
203+
"ignored_scope": nncf.IgnoredScope(
204+
types=["Multiply", "Subtract", "Sigmoid"],
205+
subgraphs=[
206+
nncf.Subgraph(
207+
inputs=["/model.23/Concat", "/model.23/Concat_1", "/model.23/Concat_2"],
208+
outputs=["output0/sink_port_0"],
209+
)
210+
],
211+
),
212+
},
213+
"backends": [BackendType.OV],
214+
"batch_size": 1,
215+
},
136216
# Timm models
137217
{
138218
"reported_name": "timm/crossvit_9_240",
Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
# Copyright (c) 2025 Intel Corporation
2+
# Licensed under the Apache License, Version 2.0 (the "License");
3+
# you may not use this file except in compliance with the License.
4+
# You may obtain a copy of the License at
5+
# http://www.apache.org/licenses/LICENSE-2.0
6+
# Unless required by applicable law or agreed to in writing, software
7+
# distributed under the License is distributed on an "AS IS" BASIS,
8+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9+
# See the License for the specific language governing permissions and
10+
# limitations under the License.
11+
12+
from pathlib import Path
13+
from typing import Dict, Tuple
14+
15+
import openvino as ov
16+
import torch
17+
from ultralytics import YOLO
18+
from ultralytics.data.utils import check_det_dataset
19+
from ultralytics.engine.validator import BaseValidator as Validator
20+
from ultralytics.utils.torch_utils import de_parallel
21+
22+
import nncf
23+
from nncf.torch import disable_patching
24+
from tests.post_training.pipelines.base import OV_BACKENDS
25+
from tests.post_training.pipelines.base import BackendType
26+
from tests.post_training.pipelines.base import PTQTestPipeline
27+
28+
29+
class UltralyticsDetection(PTQTestPipeline):
30+
"""Pipeline for Yolo detection models from the Ultralytics repository"""
31+
32+
def prepare_model(self) -> None:
33+
if self.batch_size != 1:
34+
msg = "Batch size > 1 is not supported"
35+
raise RuntimeError(msg)
36+
37+
model_path = f"{self.fp32_model_dir}/{self.model_id}"
38+
yolo = YOLO(f"{model_path}.pt")
39+
self.validator, self.data_loader = self._prepare_validation(yolo, "coco128.yaml")
40+
self.dummy_tensor = torch.ones((1, 3, 640, 640))
41+
42+
if self.backend in OV_BACKENDS + [BackendType.FP32]:
43+
onnx_model_path = Path(f"{model_path}.onnx")
44+
ir_model_path = self.fp32_model_dir / "model_fp32.xml"
45+
yolo.export(format="onnx", dynamic=True, half=False)
46+
ov.save_model(ov.convert_model(onnx_model_path), ir_model_path)
47+
self.model = ov.Core().read_model(ir_model_path)
48+
49+
if self.backend == BackendType.FX_TORCH:
50+
pt_model = yolo.model
51+
# Run mode one time to initialize all
52+
# internal variables
53+
pt_model(self.dummy_tensor)
54+
55+
with torch.no_grad():
56+
with disable_patching():
57+
self.model = torch.export.export(pt_model, args=(self.dummy_tensor,), strict=False).module()
58+
59+
def prepare_preprocessor(self) -> None:
60+
pass
61+
62+
@staticmethod
63+
def _validate_fx(
64+
model: ov.Model, data_loader: torch.utils.data.DataLoader, validator: Validator, num_samples: int = None
65+
) -> Tuple[Dict, int, int]:
66+
compiled_model = torch.compile(model, backend="openvino")
67+
for batch_i, batch in enumerate(data_loader):
68+
if num_samples is not None and batch_i == num_samples:
69+
break
70+
batch = validator.preprocess(batch)
71+
preds = compiled_model(batch["img"])
72+
preds = validator.postprocess(preds)
73+
validator.update_metrics(preds, batch)
74+
stats = validator.get_stats()
75+
return stats, validator.seen, validator.nt_per_class.sum()
76+
77+
@staticmethod
78+
def _validate_ov(
79+
model: ov.Model, data_loader: torch.utils.data.DataLoader, validator: Validator, num_samples: int = None
80+
) -> Tuple[Dict, int, int]:
81+
model.reshape({0: [1, 3, -1, -1]})
82+
compiled_model = ov.compile_model(model)
83+
output_layer = compiled_model.output(0)
84+
for batch_i, batch in enumerate(data_loader):
85+
if num_samples is not None and batch_i == num_samples:
86+
break
87+
batch = validator.preprocess(batch)
88+
preds = torch.from_numpy(compiled_model(batch["img"])[output_layer])
89+
preds = validator.postprocess(preds)
90+
validator.update_metrics(preds, batch)
91+
stats = validator.get_stats()
92+
return stats, validator.seen, validator.nt_per_class.sum()
93+
94+
def get_transform_calibration_fn(self):
95+
def transform_func(batch):
96+
return self.validator.preprocess(batch)["img"]
97+
98+
return transform_func
99+
100+
def prepare_calibration_dataset(self):
101+
self.calibration_dataset = nncf.Dataset(self.data_loader, self.get_transform_calibration_fn())
102+
103+
@staticmethod
104+
def _prepare_validation(model: YOLO, data: str) -> Tuple[Validator, torch.utils.data.DataLoader]:
105+
custom = {"rect": False, "batch": 1} # method defaults
106+
args = {**model.overrides, **custom, "mode": "val"} # highest priority args on the right
107+
108+
validator = model._smart_load("validator")(args=args, _callbacks=model.callbacks)
109+
stride = 32 # default stride
110+
validator.stride = stride # used in get_dataloader() for padding
111+
validator.data = check_det_dataset(data)
112+
validator.init_metrics(de_parallel(model))
113+
114+
data_loader = validator.get_dataloader(validator.data.get(validator.args.split), validator.args.batch)
115+
116+
return validator, data_loader
117+
118+
def _validate(self):
119+
if self.backend == BackendType.FP32:
120+
stats, _, _ = self._validate_ov(self.model, self.data_loader, self.validator)
121+
elif self.backend in OV_BACKENDS:
122+
stats, _, _ = self._validate_ov(self.compressed_model, self.data_loader, self.validator)
123+
elif self.backend == BackendType.FX_TORCH:
124+
stats, _, _ = self._validate_fx(self.compressed_model, self.data_loader, self.validator)
125+
else:
126+
msg = f"Backend {self.backend} is not supported in UltralyticsDetection"
127+
raise RuntimeError(msg)
128+
129+
self.run_info.metric_name = "mAP50(B)"
130+
self.run_info.metric_value = stats["metrics/mAP50(B)"]

tests/post_training/requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,3 +21,4 @@ accelerate==1.1.0
2121
transformers==4.48.3
2222
whowhatbench @ git+https://github.com/openvinotoolkit/openvino.genai.git@2025.0.0.0#subdirectory=tools/who_what_benchmark
2323
datasets==3.1.0
24+
ultralytics==8.3.27

0 commit comments

Comments
 (0)