Before you deploy your model, you might want to run a test inference with the exported file, just in case to confirm your model was exported successfully. Here is how you can do it for Deeplab.
NOTE: I am running the code on Google Colab
- Select TensorFlow1
%tensorflow_version 1.x
- Mount Google Drive (To access file on google drive)
from google.colab import drive drive.mount('/content/gdrive')
- Clone TFOD Repo
!git clone https://github.com/tensorflow/models.git /content/gdrive/MyDrive/tf1_retrain
- Install Requirements
!pip install tf_slim !pip install lvis !pip install tensorflow.io
- Set Environment
import os %cd /content/gdrive/MyDrive/tf1_retrain/research !protoc object_detection/protos/*.proto --python_out=. pwd = os.getcwd() os.environ['PYTHONPATH'] += f':{pwd}:{pwd}/slim'
- Change Directory
%cd deeplab/
- Import
import collections import os import io import sys import tarfile import tempfile import urllib import tensorflow as tf from IPython import display from ipywidgets import interact from ipywidgets import interactive from matplotlib import gridspec from matplotlib import pyplot as plt import numpy as np from PIL import Image sys.path.append('utils') import get_dataset_colormap
- Function to run inference
def run_inference_for_single_image(image, graph): INPUT_TENSOR_NAME = 'ImageTensor:0' OUTPUT_TENSOR_NAME = 'SemanticPredictions:0' INPUT_SIZE = 513 with graph.as_default(): with tf.Session() as sess: """Runs inference on a single image. Args: image: A PIL.Image object, raw input image. Returns: resized_image: RGB image resized from original input image. seg_map: Segmentation map of `resized_image`. """ width, height = image.size resize_ratio = 1.0 * INPUT_SIZE / max(width, height) target_size = (int(resize_ratio * width), int(resize_ratio * height)) resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS) batch_seg_map = sess.run( OUTPUT_TENSOR_NAME, feed_dict={INPUT_TENSOR_NAME: [np.asarray(resized_image)]}) seg_map = batch_seg_map[0] return resized_image, seg_map
- Function to visualize result
LABEL_NAMES = np.asarray([ 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv' ]) FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1) FULL_COLOR_MAP = get_dataset_colormap.label_to_color_image(FULL_LABEL_MAP) def vis_segmentation(image, seg_map): plt.figure(figsize=(15, 5)) grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1]) plt.subplot(grid_spec[0]) plt.imshow(image) plt.axis('off') plt.title('input image') plt.subplot(grid_spec[1]) seg_image = get_dataset_colormap.label_to_color_image( seg_map, get_dataset_colormap.get_pascal_name()).astype(np.uint8) plt.imshow(seg_image) plt.axis('off') plt.title('segmentation map') plt.subplot(grid_spec[2]) plt.imshow(image) plt.imshow(seg_image, alpha=0.7) plt.axis('off') plt.title('segmentation overlay') unique_labels = np.unique(seg_map) ax = plt.subplot(grid_spec[3]) plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest') ax.yaxis.tick_right() plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels]) plt.xticks([], []) ax.tick_params(width=0) plt.show()
- Function to load graph
def load_graph(PATH_TO_PB_FILE): detection_graph = tf.Graph() with detection_graph.as_default(): graph_def = None with tf.gfile.GFile(PATH_TO_PB_FILE, 'rb') as fid: graph_def = tf.GraphDef.FromString(fid.read()) tf.import_graph_def(graph_def, name='') return detection_graph
- Set Path
image_path = '/content/gdrive/MyDrive/deeplabv3_mnv2_pascal/highway.jpg' PATH_TO_ORIGINAL_FROZEN_GRAPH = '/content/gdrive/MyDrive/deeplabv3_mnv2_pascal/frozen_inference_graph.pb'
- Set Inline to enable displaying result in Google Colab
%matplotlib inline
- Run INFERENCE
loaded_graph = load_graph(PATH_TO_ORIGINAL_FROZEN_GRAPH) image = Image.open(image_path) resized_image, segmap = run_inference_for_single_image(image,loaded_graph) vis_segmentation(resized_image,segmap)