-
Notifications
You must be signed in to change notification settings - Fork 6
/
safe_search.py
52 lines (42 loc) · 1.61 KB
/
safe_search.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# Connect to Google Cloud Vision API and utilize their safe_search to
# moderate illustration submissions
from google.cloud import vision
import io
def detect_safe_search(path):
"""
Detects adult, violent or racy content in uploaded images
Input: path to the image file
Output: String, either stating 'No inappropriate material detected'
or 'Image Flagged' with information about what is inappropriate
"""
client = vision.ImageAnnotatorClient()
# If local illustration
with io.open(path, "rb") as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
# # If remote illustration
# image = vision.types.Image()
# image.source.image_uri = uri
response = client.safe_search_detection(image=image)
safe = response.safe_search_annotation
# Names of likelihood from google.cloud.vision.enums
likelihood_name = (
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY",
)
# Check illustration against each safe_search category
# Flag if inappropriate material is 'Possible' or above
if safe.adult > 2 or safe.violence > 2 or safe.racy > 2:
# Set flag - provide information about what is inappropriate
flagged = [
("adult: {}".format(likelihood_name[safe.adult])),
("violence: {}".format(likelihood_name[safe.violence])),
("racy: {}".format(likelihood_name[safe.racy])),
]
return f"Image Flagged: {flagged}"
else:
return "No inappropriate material detected"