Created
June 21, 2020 17:03
-
-
Save jefersondaniel/4ab6a3656b8a736fe46bf09e26410697 to your computer and use it in GitHub Desktop.
OpenCV Body Detection
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import time | |
import numpy as np | |
import cv2 | |
from matplotlib import pyplot as plt | |
photos = [ | |
'C:\\Users\\jefer\\Pictures\\pessoas.jpg', | |
] | |
img = cv2.imread(photos[0],cv2.IMREAD_COLOR) | |
body_cascade = cv2.CascadeClassifier('haarcascade_fullbody.xml') | |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
bodies = body_cascade.detectMultiScale(gray, 1.1, 3) | |
for (x,y,w,h) in bodies: | |
cv2.rectangle(img, (x,y), (x+w, y+h), (12,150,100), 2) | |
cv2.imshow('image',img) | |
cv2.waitKey(0) # If you don'tput this line,thenthe image windowis just a flash. If you put any number other than 0, the same happens. | |
cv2.destroyAllWindows() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import time | |
import numpy as np | |
import cv2 | |
from matplotlib import pyplot as plt | |
photos = [ | |
'C:\\Users\\jefer\\Pictures\\pessoas.jpg', | |
] | |
model_name = 'yolov3-tiny' | |
net = cv2.dnn.readNetFromDarknet('{}.cfg.txt'.format(model_name), '{}.weights'.format(model_name)) | |
labels = open('coco.names.txt').read().strip().split("\n") | |
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8") | |
image = cv2.imread(photos[2]) | |
(input_height, input_width) = image.shape[:2] | |
layer_names = net.getLayerNames() | |
layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] | |
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False) | |
net.setInput(blob) | |
start = time.time() | |
layerOutputs = net.forward(layer_names) | |
end = time.time() | |
print("detection took {:.6f} seconds".format(end - start)) | |
boxes = [] | |
confidences = [] | |
classIDs = [] | |
min_confidence = 0.5 | |
threshold = 0.3 | |
for output in layerOutputs: | |
for detection in output: | |
scores = detection[5:] | |
classID = np.argmax(scores) | |
confidence = scores[classID] | |
if confidence < min_confidence: | |
continue | |
# scale the bounding box coordinates back relative to the | |
# size of the image, keeping in mind that YOLO actually | |
# returns the center (x, y)-coordinates of the bounding | |
# box followed by the boxes' width and height | |
box = detection[0:4] * np.array([input_width, input_height, input_width, input_height]) | |
(centerX, centerY, width, height) = box.astype("int") | |
# use the center (x, y)-coordinates to derive the top and | |
# and left corner of the bounding box | |
x = int(centerX - (width / 2)) | |
y = int(centerY - (height / 2)) | |
# update our list of bounding box coordinates, confidences, | |
# and class IDs | |
boxes.append([x, y, int(width), int(height)]) | |
confidences.append(float(confidence)) | |
classIDs.append(classID) | |
idxs = cv2.dnn.NMSBoxes(boxes, confidences, min_confidence, threshold) | |
if len(idxs) > 0: | |
for i in idxs.flatten(): | |
# extract the bounding box coordinates | |
(x, y) = (boxes[i][0], boxes[i][1]) | |
(w, h) = (boxes[i][2], boxes[i][3]) | |
# draw a bounding box rectangle and label on the image | |
color = [int(c) for c in colors[classIDs[i]]] | |
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2) | |
text = "{}: {:.4f}".format(labels[classIDs[i]], confidences[i]) | |
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) | |
cv2.imshow("Image", image) | |
cv2.waitKey(0) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
https://pjreddie.com/darknet/yolo/