---------------------------------------------------------------------------ModuleNotFoundError Traceback (most recent call last)
/workspaces/data_mining/data_mining/aiml-common/lectures/scene-understanding/object-detection/object-detection-intro/colab_webcam.ipynb Cell 2 line 3
<a href='vscode-notebook-cell://dev-container%2B7b22686f737450617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e67222c226c6f63616c446f636b6572223a66616c73652c2273657474696e6773223a7b22686f7374223a22756e69783a2f2f2f7661722f72756e2f646f636b65722e736f636b227d2c22636f6e66696746696c65223a7b22246d6964223a312c22667350617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c2265787465726e616c223a2266696c653a2f2f2f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c2270617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c22736368656d65223a2266696c65227d7d/workspaces/data_mining/data_mining/aiml-common/lectures/scene-understanding/object-detection/object-detection-intro/colab_webcam.ipynb#W1sdnNjb2RlLXJlbW90ZQ%3D%3D?line=0'>1</a> # import dependencies
<a href='vscode-notebook-cell://dev-container%2B7b22686f737450617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e67222c226c6f63616c446f636b6572223a66616c73652c2273657474696e6773223a7b22686f7374223a22756e69783a2f2f2f7661722f72756e2f646f636b65722e736f636b227d2c22636f6e66696746696c65223a7b22246d6964223a312c22667350617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c2265787465726e616c223a2266696c653a2f2f2f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c2270617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c22736368656d65223a2266696c65227d7d/workspaces/data_mining/data_mining/aiml-common/lectures/scene-understanding/object-detection/object-detection-intro/colab_webcam.ipynb#W1sdnNjb2RlLXJlbW90ZQ%3D%3D?line=1'>2</a> from IPython.display import display, Javascript, Image
----> <a href='vscode-notebook-cell://dev-container%2B7b22686f737450617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e67222c226c6f63616c446f636b6572223a66616c73652c2273657474696e6773223a7b22686f7374223a22756e69783a2f2f2f7661722f72756e2f646f636b65722e736f636b227d2c22636f6e66696746696c65223a7b22246d6964223a312c22667350617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c2265787465726e616c223a2266696c653a2f2f2f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c2270617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c22736368656d65223a2266696c65227d7d/workspaces/data_mining/data_mining/aiml-common/lectures/scene-understanding/object-detection/object-detection-intro/colab_webcam.ipynb#W1sdnNjb2RlLXJlbW90ZQ%3D%3D?line=2'>3</a> from google.colab.output import eval_js
<a href='vscode-notebook-cell://dev-container%2B7b22686f737450617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e67222c226c6f63616c446f636b6572223a66616c73652c2273657474696e6773223a7b22686f7374223a22756e69783a2f2f2f7661722f72756e2f646f636b65722e736f636b227d2c22636f6e66696746696c65223a7b22246d6964223a312c22667350617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c2265787465726e616c223a2266696c653a2f2f2f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c2270617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c22736368656d65223a2266696c65227d7d/workspaces/data_mining/data_mining/aiml-common/lectures/scene-understanding/object-detection/object-detection-intro/colab_webcam.ipynb#W1sdnNjb2RlLXJlbW90ZQ%3D%3D?line=3'>4</a> from base64 import b64decode, b64encode
<a href='vscode-notebook-cell://dev-container%2B7b22686f737450617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e67222c226c6f63616c446f636b6572223a66616c73652c2273657474696e6773223a7b22686f7374223a22756e69783a2f2f2f7661722f72756e2f646f636b65722e736f636b227d2c22636f6e66696746696c65223a7b22246d6964223a312c22667350617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c2265787465726e616c223a2266696c653a2f2f2f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c2270617468223a222f686f6d652f70616e74656c69732e6d6f6e6f67696f756469732f6c6f63616c2f7765622f73697465732f636f75727365732f646174615f6d696e696e672f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c22736368656d65223a2266696c65227d7d/workspaces/data_mining/data_mining/aiml-common/lectures/scene-understanding/object-detection/object-detection-intro/colab_webcam.ipynb#W1sdnNjb2RlLXJlbW90ZQ%3D%3D?line=4'>5</a> import cv2
ModuleNotFoundError: No module named 'google.colab'
Helper Functions
Below are a few helper function to make converting between different image data types and formats.
# function to convert the JavaScript object into an OpenCV imagedef js_to_image(js_reply):""" Params: js_reply: JavaScript object containing image from webcam Returns: img: OpenCV BGR image """# decode base64 image image_bytes = b64decode(js_reply.split(',')[1])# convert bytes to numpy array jpg_as_np = np.frombuffer(image_bytes, dtype=np.uint8)# decode numpy array into OpenCV BGR image img = cv2.imdecode(jpg_as_np, flags=1)return img# function to convert OpenCV Rectangle bounding box image into base64 byte string to be overlayed on video streamdef bbox_to_bytes(bbox_array):""" Params: bbox_array: Numpy array (pixels) containing rectangle to overlay on video stream. Returns: bytes: Base64 image byte string """# convert array into PIL image bbox_PIL = PIL.Image.fromarray(bbox_array, 'RGBA') iobuf = io.BytesIO()# format bbox into png for return bbox_PIL.save(iobuf, format='png')# format return string bbox_bytes ='data:image/png;base64,{}'.format((str(b64encode(iobuf.getvalue()), 'utf-8')))return bbox_bytes
Haar Cascade Classifier
For this tutorial we will run a simple object detection algorithm called Haar Cascade on our images and video fetched from our webcam. OpenCV has a pre-trained Haar Cascade face detection model.
# initialize the Haar Cascade face detection modelface_cascade = cv2.CascadeClassifier(cv2.samples.findFile(cv2.data.haarcascades +'haarcascade_frontalface_default.xml'))
Webcam Images
Running code on images taken from webcam is fairly straight-forward. We will utilize code within Google Colab’s Code Snippets that has a variety of useful code functions to perform various tasks.
We will be using the code snippet for Camera Capture to utilize your computer’s webcam.
def take_photo(filename='photo.jpg', quality=0.8): js = Javascript(''' async function takePhoto(quality) { const div = document.createElement('div'); const capture = document.createElement('button'); capture.textContent = 'Capture'; div.appendChild(capture); const video = document.createElement('video'); video.style.display = 'block'; const stream = await navigator.mediaDevices.getUserMedia({video: true}); document.body.appendChild(div); div.appendChild(video); video.srcObject = stream; await video.play(); // Resize the output to fit the video element. google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true); // Wait for Capture to be clicked. await new Promise((resolve) => capture.onclick = resolve); const canvas = document.createElement('canvas'); canvas.width = video.videoWidth; canvas.height = video.videoHeight; canvas.getContext('2d').drawImage(video, 0, 0); stream.getVideoTracks()[0].stop(); div.remove(); return canvas.toDataURL('image/jpeg', quality); } ''') display(js)# get photo data data = eval_js('takePhoto({})'.format(quality))# get OpenCV format image img = js_to_image(data)# grayscale img gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)print(gray.shape)# get face bounding box coordinates using Haar Cascade faces = face_cascade.detectMultiScale(gray)# draw face bounding box on imagefor (x,y,w,h) in faces: img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)# save image cv2.imwrite(filename, img)return filename
try: filename = take_photo('photo.jpg')print('Saved to {}'.format(filename))# Show the image which was just taken. display(Image(filename))exceptExceptionas err:# Errors will be thrown if the user does not have a webcam or if they do not# grant the page permission to access it.print(str(err))
Webcam Videos
Running code on webcam video is a little more complex than images. We need to start a video stream using our webcam as input. Then we run each frame through our progam (face detection) and create an overlay image that contains bounding box of detection(s). We then overlay the bounding box image back onto the next frame of our video stream.
image.png
# JavaScript to properly create our live video stream using our webcam as inputdef video_stream(): js = Javascript(''' var video; var div = null; var stream; var captureCanvas; var imgElement; var labelElement; var pendingResolve = null; var shutdown = false; function removeDom() { stream.getVideoTracks()[0].stop(); video.remove(); div.remove(); video = null; div = null; stream = null; imgElement = null; captureCanvas = null; labelElement = null; } function onAnimationFrame() { if (!shutdown) { window.requestAnimationFrame(onAnimationFrame); } if (pendingResolve) { var result = ""; if (!shutdown) { captureCanvas.getContext('2d').drawImage(video, 0, 0, 640, 480); result = captureCanvas.toDataURL('image/jpeg', 0.8) } var lp = pendingResolve; pendingResolve = null; lp(result); } } async function createDom() { if (div !== null) { return stream; } div = document.createElement('div'); div.style.border = '2px solid black'; div.style.padding = '3px'; div.style.width = '100%'; div.style.maxWidth = '600px'; document.body.appendChild(div); const modelOut = document.createElement('div'); modelOut.innerHTML = "<span>Status:</span>"; labelElement = document.createElement('span'); labelElement.innerText = 'No data'; labelElement.style.fontWeight = 'bold'; modelOut.appendChild(labelElement); div.appendChild(modelOut); video = document.createElement('video'); video.style.display = 'block'; video.width = div.clientWidth - 6; video.setAttribute('playsinline', ''); video.onclick = () => { shutdown = true; }; stream = await navigator.mediaDevices.getUserMedia( {video: { facingMode: "environment"}}); div.appendChild(video); imgElement = document.createElement('img'); imgElement.style.position = 'absolute'; imgElement.style.zIndex = 1; imgElement.onclick = () => { shutdown = true; }; div.appendChild(imgElement); const instruction = document.createElement('div'); instruction.innerHTML = '<span style="color: red; font-weight: bold;">' + 'When finished, click here or on the video to stop this demo</span>'; div.appendChild(instruction); instruction.onclick = () => { shutdown = true; }; video.srcObject = stream; await video.play(); captureCanvas = document.createElement('canvas'); captureCanvas.width = 640; //video.videoWidth; captureCanvas.height = 480; //video.videoHeight; window.requestAnimationFrame(onAnimationFrame); return stream; } async function stream_frame(label, imgData) { if (shutdown) { removeDom(); shutdown = false; return ''; } var preCreate = Date.now(); stream = await createDom(); var preShow = Date.now(); if (label != "") { labelElement.innerHTML = label; } if (imgData != "") { var videoRect = video.getClientRects()[0]; imgElement.style.top = videoRect.top + "px"; imgElement.style.left = videoRect.left + "px"; imgElement.style.width = videoRect.width + "px"; imgElement.style.height = videoRect.height + "px"; imgElement.src = imgData; } var preCapture = Date.now(); var result = await new Promise(function(resolve, reject) { pendingResolve = resolve; }); shutdown = false; return {'create': preShow - preCreate, 'show': preCapture - preShow, 'capture': Date.now() - preCapture, 'img': result}; } ''') display(js)def video_frame(label, bbox): data = eval_js('stream_frame("{}", "{}")'.format(label, bbox))return data
# start streaming video from webcamvideo_stream()# label for videolabel_html ='Capturing...'# initialze bounding box to emptybbox =''count =0whileTrue: js_reply = video_frame(label_html, bbox)ifnot js_reply:break# convert JS response to OpenCV Image img = js_to_image(js_reply["img"])# create transparent overlay for bounding box bbox_array = np.zeros([480,640,4], dtype=np.uint8)# grayscale image for face detection gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# get face region coordinates faces = face_cascade.detectMultiScale(gray)# get face bounding box for overlayfor (x,y,w,h) in faces: bbox_array = cv2.rectangle(bbox_array,(x,y),(x+w,y+h),(255,0,0),2) bbox_array[:,:,3] = (bbox_array.max(axis =2) >0 ).astype(int) *255# convert overlay of bbox into bytes bbox_bytes = bbox_to_bytes(bbox_array)# update bbox so next frame gets new overlay bbox = bbox_bytes
Hope You Enjoyed!
If you enjoyed the tutorial and want to see more videos or tutorials check out my YouTube channel HERE