A quick intro to using the pre-trained model to detect and segment objects.
import osimport sysimport randomimport mathimport numpy as npimport skimage.ioimport matplotlibimport matplotlib.pyplot as plt# Root directory of the projectROOT_DIR = os.path.abspath("../")# Import Mask RCNNsys.path.append(ROOT_DIR) # To find local version of the libraryfrom mrcnn import utilsimport mrcnn.model as modellibfrom mrcnn import visualize# Import COCO configsys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local versionimport coco%matplotlib inline # Directory to save logs and trained modelMODEL_DIR = os.path.join(ROOT_DIR, "logs")# Local path to trained weights fileCOCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")# Download COCO trained weights from Releases if neededifnot os.path.exists(COCO_MODEL_PATH): utils.download_trained_weights(COCO_MODEL_PATH)# Directory of images to run detection onIMAGE_DIR = os.path.join(ROOT_DIR, "images")
---------------------------------------------------------------------------ModuleNotFoundError Traceback (most recent call last)
Cell In[1], line 15 13 # Import Mask RCNN
14 sys.path.append(ROOT_DIR) # To find local version of the library
---> 15 from mrcnn import utils
16 import mrcnn.model as modellib
17 from mrcnn import visualize
ModuleNotFoundError: No module named 'mrcnn'
Configurations
We’ll be using a model trained on the MS-COCO dataset. The configurations of this model are in the CocoConfig class in coco.py.
For inferencing, modify the configurations a bit to fit the task. To do so, sub-class the CocoConfig class and override the attributes you need to change.
class InferenceConfig(coco.CocoConfig):# Set batch size to 1 since we'll be running inference on# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT =1 IMAGES_PER_GPU =1config = InferenceConfig()config.display()
# Create model object in inference mode.model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)# Load weights trained on MS-COCOmodel.load_weights(COCO_MODEL_PATH, by_name=True)
Class Names
The model classifies objects and returns class IDs, which are integer value that identify each class. Some datasets assign integer values to their classes and some don’t. For example, in the MS-COCO dataset, the ‘person’ class is 1 and ‘teddy bear’ is 88. The IDs are often sequential, but not always. The COCO dataset, for example, has classes associated with class IDs 70 and 72, but not 71.
To improve consistency, and to support training on data from multiple sources at the same time, our Dataset class assigns it’s own sequential integer IDs to each class. For example, if you load the COCO dataset using our Dataset class, the ‘person’ class would get class ID = 1 (just like COCO) and the ‘teddy bear’ class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names.
To get the list of class names, you’d load the dataset and then use the class_names property like this.
We don’t want to require you to download the COCO dataset just to run this demo, so we’re including the list of class names below. The index of the class name in the list represent its ID (first class is 0, second is 1, third is 2, …etc.)
# COCO Class names# Index of the class in the list is its ID. For example, to get ID of# the teddy bear class, use: class_names.index('teddy bear')class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane','bus', 'train', 'truck', 'boat', 'traffic light','fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird','cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear','zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie','suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball','kite', 'baseball bat', 'baseball glove', 'skateboard','surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup','fork', 'knife', 'spoon', 'bowl', 'banana', 'apple','sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza','donut', 'cake', 'chair', 'couch', 'potted plant', 'bed','dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote','keyboard', 'cell phone', 'microwave', 'oven', 'toaster','sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors','teddy bear', 'hair drier', 'toothbrush']
Run Object Detection
# Load a random image from the images folderfile_names =next(os.walk(IMAGE_DIR))[2]image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))# Run detectionresults = model.detect([image], verbose=1)# Visualize resultsr = results[0]visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])