Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 7 additions & 9 deletions .readthedocs.yml
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
version: 2

build:
os: ubuntu-22.04
tools:
python: "3.8"

formats:
- epub
sphinx:
configuration: docs/conf.py

python:
version: 3.8
install:
- requirements: requirements/docs.txt
- requirements: requirements/readthedocs.txt
- method: pip
path: .
extra_requirements:
- docs
14 changes: 14 additions & 0 deletions app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import streamlit as st
from mmdet.apis import init_detector, inference_detector
import mmcv

st.title("Industrial Defect Detector")

uploaded_file = st.file_uploader("Choose an image...", type="jpg")

if uploaded_file:
image = mmcv.imread(uploaded_file)
model = init_detector('configs/defect/faster-rcnn_r50_fpn_1x_neu.py', 'checkpoints/latest.pth')
result = inference_detector(model, image)
model.show_result(image, result, out_file='output.jpg')
st.image('output.jpg')
25 changes: 25 additions & 0 deletions configs/defect/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Industrial Surface Defect Detection (NEU)

This folder contains a complete example of training and testing a surface defect detection model on the [NEU Surface Defect Database](https://faculty.neu.edu.cn/yunhyan/NEU_surface_defect_database.html).

---

## What’s included

- **`faster-rcnn_r50_fpn_1x_neu.py`** — Config file that adapts MMDetection’s standard Faster R-CNN to detect 6 types of surface defects (e.g., scratches, inclusions).
- **`convert_annotations.py`** — Utility script to help convert the raw NEU images into COCO-style annotations (train, val, test splits).
- (Optional) **`app.py`** — Example Streamlit app to test your trained model interactively.

---

## Why this is useful

Many factories still rely on manual visual inspection for surface quality. This simple example shows how to adapt MMDetection to a small real-world manufacturing dataset and helps engineers and students see how to structure their own quality inspection projects.

---

## How to use

1. Download the NEU dataset and extract it under `data/NEU/`.

2. Use `convert_annotations.py` to split and convert your images:
63 changes: 63 additions & 0 deletions configs/defect/faster-rcnn_r50_fpn_1x_neu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]

# --- Dataset settings ---
dataset_type = 'CocoDataset'
classes = ('crazing', 'inclusion', 'patches', 'pitted_surface', 'rolled-in_scale', 'scratch')

data_root = 'data/NEU/'

img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)

train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]

data = dict(
train=dict(
type=dataset_type,
img_prefix=data_root + 'train/',
classes=classes,
ann_file=data_root + 'annotations/train.json',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
img_prefix=data_root + 'val/',
classes=classes,
ann_file=data_root + 'annotations/val.json'),
test=dict(
type=dataset_type,
img_prefix=data_root + 'test/',
classes=classes,
ann_file=data_root + 'annotations/test.json')
)

evaluation = dict(interval=1, metric='bbox')

# --- Model settings ---
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=len(classes)
)
)
)

# --- Optimizer ---
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
lr_config = dict(step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
70 changes: 70 additions & 0 deletions tools/convert_annotations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import os
import json
import random

from tqdm import tqdm

# Edit these paths
root_dir = "data/NEU/"
train_dir = os.path.join(root_dir, "train/")
val_dir = os.path.join(root_dir, "val/")
test_dir = os.path.join(root_dir, "test/")
save_dir = os.path.join(root_dir, "annotations/")

# Your classes
classes = ["crazing", "inclusion", "patches", "pitted_surface", "rolled-in_scale", "scratch"]

# Example: random dummy box for each image
def create_dummy_annotation(img_id, w, h):
return {
"id": img_id,
"image_id": img_id,
"category_id": random.randint(1, 6),
"bbox": [50, 50, w//2, h//2],
"area": (w//2) * (h//2),
"iscrowd": 0
}

def images_from_folder(folder, start_id=1):
files = os.listdir(folder)
images = []
annotations = []
img_id = start_id
for file in tqdm(files):
if not file.endswith(".jpg"):
continue
images.append({
"id": img_id,
"width": 200, # adjust
"height": 200, # adjust
"file_name": file
})
# Example: dummy box — replace with your real box!
annotations.append(create_dummy_annotation(img_id, 200, 200))
img_id += 1
return images, annotations

def make_json(images, annotations, output_file):
coco = {
"images": images,
"annotations": annotations,
"categories": [{"id": i+1, "name": name} for i, name in enumerate(classes)]
}
with open(output_file, "w") as f:
json.dump(coco, f, indent=4)
print(f"Saved: {output_file}")

# ---- Create folders ----
os.makedirs(save_dir, exist_ok=True)

# ---- Train ----
train_images, train_annots = images_from_folder(train_dir)
make_json(train_images, train_annots, os.path.join(save_dir, "train.json"))

# ---- Val ----
val_images, val_annots = images_from_folder(val_dir, start_id=len(train_images)+1)
make_json(val_images, val_annots, os.path.join(save_dir, "val.json"))

# ---- Test ----
test_images, test_annots = images_from_folder(test_dir, start_id=len(train_images)+len(val_images)+1)
make_json(test_images, test_annots, os.path.join(save_dir, "test.json"))