-
Notifications
You must be signed in to change notification settings - Fork 0
/
demographic_face_analyze.py
52 lines (42 loc) · 1.66 KB
/
demographic_face_analyze.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from utils.util import sub_folders
import numpy as np
from glob import glob
import cv2
from tqdm import tqdm
import sys
sys.path.insert(0, "../face-parsing.PyTorch-master/")
from test import evaluate
def without_beard_degree_level_analysis(*argv):
for im_dir in argv:
cate = im_dir.split("/")[-2]
data = []
for path in sub_folders(im_dir):
images_path = glob(path + "/*")
for image in tqdm(images_path):
im = cv2.imread(image)
mask = evaluate([im, ])
data.append(round(np.mean(without_beard_region(im, mask[0])), 2))
np.save(f"./without_beard_data/{cate}_without_beard.npy", data)
def without_beard_region(image, mask, position=False):
assert ((len(image.shape) == 3) or (
len(image.shape) == 2)), f"Expect a gray or colored image, but get {len(image.shape)} channel image"
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
mask = mask.astype(np.uint8)
mask = cv2.resize(mask, (224, 224), fx=1, fy=1, interpolation=cv2.INTER_NEAREST)
face_pos = np.where(mask == 1)
nose = np.where(mask == 10)
b_lim = nose[0][-1]
without_bread = [[], []]
i = 0
while face_pos[0][i] < b_lim:
without_bread[0].append(face_pos[0][i])
without_bread[1].append(face_pos[1][i])
i += 1
# gray[without_bread[0], without_bread[1]] = 255
# cv2.imwrite(f"{gray[0, 0]}.png", gray)
if position:
return gray[without_bread[0], without_bread[1]], without_bread
return gray[without_bread[0], without_bread[1]]
if __name__ == '__main__':
without_beard_degree_level_analysis("./res/")