-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_video_face_detect_onnx.py
100 lines (86 loc) · 3.46 KB
/
run_video_face_detect_onnx.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
"""
This code uses the onnx model to detect faces from live video or cameras.
"""
import time
import cv2
import numpy as np
import onnx
import vision.utils.box_utils_numpy as box_utils
from caffe2.python.onnx import backend
# onnx runtime
import onnxruntime as ort
def predict(width, height, confidences, boxes, prob_threshold, iou_threshold=0.3, top_k=-1):
boxes = boxes[0]
confidences = confidences[0]
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = box_utils.hard_nms(box_probs,
iou_threshold=iou_threshold,
top_k=top_k,
)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4]
label_path = "models/voc-model-labels.txt"
onnx_path = "models/onnx/version-RFB-320.onnx"
class_names = [name.strip() for name in open(label_path).readlines()]
predictor = onnx.load(onnx_path)
onnx.checker.check_model(predictor)
onnx.helper.printable_graph(predictor.graph)
predictor = backend.prepare(predictor, device="CPU") # default CPU
ort_session = ort.InferenceSession(onnx_path)
input_name = ort_session.get_inputs()[0].name
cap = cv2.VideoCapture("/home/linzai/Videos/video/16_6.MP4") # capture from camera
threshold = 0.7
sum = 0
while True:
ret, orig_image = cap.read()
if orig_image is None:
print("no img")
break
image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (320, 240))
# image = cv2.resize(image, (640, 480))
image_mean = np.array([127, 127, 127])
image = (image - image_mean) / 128
image = np.transpose(image, [2, 0, 1])
image = np.expand_dims(image, axis=0)
image = image.astype(np.float32)
# confidences, boxes = predictor.run(image)
time_time = time.time()
confidences, boxes = ort_session.run(None, {input_name: image})
print("cost time:{}".format(time.time() - time_time))
boxes, labels, probs = predict(orig_image.shape[1], orig_image.shape[0], confidences, boxes, threshold)
for i in range(boxes.shape[0]):
box = boxes[i, :]
label = f"{class_names[labels[i]]}: {probs[i]:.2f}"
cv2.rectangle(orig_image, (box[0], box[1]), (box[2], box[3]), (255, 255, 0), 4)
# cv2.putText(orig_image, label,
# (box[0] + 20, box[1] + 40),
# cv2.FONT_HERSHEY_SIMPLEX,
# 1, # font scale
# (255, 0, 255),
# 2) # line type
sum += boxes.shape[0]
orig_image = cv2.resize(orig_image, (0, 0), fx=0.7, fy=0.7)
cv2.imshow('annotated', orig_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
print("sum:{}".format(sum))