-
Notifications
You must be signed in to change notification settings - Fork 2
/
emotion_detect.py
176 lines (147 loc) · 8.51 KB
/
emotion_detect.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
import cognitive_face as CF
# Set subscription key and endpoint
KEY = 'b056669e9d264c848486577dfb6ac4cb'
ENDPOINT = 'https://marvingpt-emotion-detect.cognitiveservices.azure.com/'
CF.Key.set(KEY)
CF.BaseUrl.set(ENDPOINT)
# Detect faces in image
image_url = 'https://media.istockphoto.com/id/1399611762/photo/offended-little-hispanic-girl-looking-sad-and-upset-while-posing-against-a-blue-studio.jpg?b=1&s=170667a&w=0&k=20&c=r4A6C1FfjAwnWl0ZVL9t6EIkDhGo8o4KSdYNwzvKJsY='
faces = CF.face.detect(image_url, attributes='age,gender,smile')
# Print face attributes
for face in faces:
print('Age:', face['faceAttributes']['age'])
print('Gender:', face['faceAttributes']['gender'])
print('Smile:', face['faceAttributes']['smile'])
# import asyncio
# import io
# import os
# import sys
# import time
# import uuid
# import requests
# from urllib.parse import urlparse
# from io import BytesIO
# # To install this module, run:
# # python -m pip install Pillow
# from PIL import Image, ImageDraw
# from azure.cognitiveservices.vision.face import FaceClient
# from msrest.authentication import CognitiveServicesCredentials
# from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, QualityForRecognition
# # This key will serve all examples in this document.
# KEY = "b056669e9d264c848486577dfb6ac4cb"
# # This endpoint will be used in all examples in this quickstart.
# ENDPOINT = "https://marvingpt-emotion-detect.cognitiveservices.azure.com/"
# # Base url for the Verify and Facelist/Large Facelist operations
# IMAGE_BASE_URL = 'https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/'
# # Used in the Person Group Operations and Delete Person Group examples.
# # You can call list_person_groups to print a list of preexisting PersonGroups.
# # SOURCE_PERSON_GROUP_ID should be all lowercase and alphanumeric. For example, 'mygroupname' (dashes are OK).
# PERSON_GROUP_ID = str(uuid.uuid4()) # assign a random ID (or name it anything)
# # Used for the Delete Person Group example.
# TARGET_PERSON_GROUP_ID = str(uuid.uuid4()) # assign a random ID (or name it anything)
# # Create an authenticated FaceClient.
# face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
# print(face_client)
# '''
# Create the PersonGroup
# '''
# # Create empty Person Group. Person Group ID must be lower case, alphanumeric, and/or with '-', '_'.
# print('Person group:', PERSON_GROUP_ID)
# face_client.person_group.create(person_group_id=PERSON_GROUP_ID, name=PERSON_GROUP_ID, recognition_model='recognition_04')
# # Define woman friend
# woman = face_client.person_group_person.create(PERSON_GROUP_ID, name="Woman")
# # Define man friend
# man = face_client.person_group_person.create(PERSON_GROUP_ID, name="Man")
# # Define child friend
# child = face_client.person_group_person.create(PERSON_GROUP_ID, name="Child")
# '''
# Detect faces and register them to each person
# '''
# # Find all jpeg images of friends in working directory (TBD pull from web instead)
# woman_images = ["https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Mom1.jpg", "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Mom2.jpg"]
# man_images = ["https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Dad1.jpg", "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Dad2.jpg"]
# child_images = ["https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Son1.jpg", "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Son2.jpg"]
# # Add to woman person
# for image in woman_images:
# # Check if the image is of sufficent quality for recognition.
# sufficientQuality = True
# detected_faces = face_client.face.detect_with_url(url=image, detection_model='detection_03', recognition_model='recognition_04', return_face_attributes=['qualityForRecognition'])
# for face in detected_faces:
# if face.face_attributes.quality_for_recognition != QualityForRecognition.high:
# sufficientQuality = False
# break
# face_client.person_group_person.add_face_from_url(PERSON_GROUP_ID, woman.person_id, image)
# print("face {} added to person {}".format(face.face_id, woman.person_id))
# if not sufficientQuality: continue
# # Add to man person
# for image in man_images:
# # Check if the image is of sufficent quality for recognition.
# sufficientQuality = True
# detected_faces = face_client.face.detect_with_url(url=image, detection_model='detection_03', recognition_model='recognition_04', return_face_attributes=['qualityForRecognition'])
# for face in detected_faces:
# if face.face_attributes.quality_for_recognition != QualityForRecognition.high:
# sufficientQuality = False
# break
# face_client.person_group_person.add_face_from_url(PERSON_GROUP_ID, man.person_id, image)
# print("face {} added to person {}".format(face.face_id, man.person_id))
# if not sufficientQuality: continue
# # Add to child person
# for image in child_images:
# # Check if the image is of sufficent quality for recognition.
# sufficientQuality = True
# detected_faces = face_client.face.detect_with_url(url=image, detection_model='detection_03', recognition_model='recognition_04', return_face_attributes=['qualityForRecognition'])
# for face in detected_faces:
# if face.face_attributes.quality_for_recognition != QualityForRecognition.high:
# sufficientQuality = False
# print("{} has insufficient quality".format(face))
# break
# face_client.person_group_person.add_face_from_url(PERSON_GROUP_ID, child.person_id, image)
# print("face {} added to person {}".format(face.face_id, child.person_id))
# if not sufficientQuality: continue
# '''
# Train PersonGroup
# '''
# # Train the person group
# print("pg resource is {}".format(PERSON_GROUP_ID))
# rawresponse = face_client.person_group.train(PERSON_GROUP_ID, raw= True)
# print(rawresponse)
# while (True):
# training_status = face_client.person_group.get_training_status(PERSON_GROUP_ID)
# print("Training status: {}.".format(training_status.status))
# print()
# if (training_status.status is TrainingStatusType.succeeded):
# break
# elif (training_status.status is TrainingStatusType.failed):
# face_client.person_group.delete(person_group_id=PERSON_GROUP_ID)
# sys.exit('Training the person group has failed.')
# time.sleep(5)
# '''
# Identify a face against a defined PersonGroup
# '''
# # Group image for testing against
# test_image = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/identification1.jpg"
# print('Pausing for 10 seconds to avoid triggering rate limit on free account...')
# time.sleep (10)
# # Detect faces
# face_ids = []
# # We use detection model 3 to get better performance, recognition model 4 to support quality for recognition attribute.
# faces = face_client.face.detect_with_url(test_image, detection_model='detection_03', recognition_model='recognition_04', return_face_attributes=['qualityForRecognition'])
# for face in faces:
# # Only take the face if it is of sufficient quality.
# if face.face_attributes.quality_for_recognition == QualityForRecognition.high or face.face_attributes.quality_for_recognition == QualityForRecognition.medium:
# face_ids.append(face.face_id)
# # Identify faces
# results = face_client.face.identify(face_ids, PERSON_GROUP_ID)
# print('Identifying faces in image')
# if not results:
# print('No person identified in the person group')
# for identifiedFace in results:
# if len(identifiedFace.candidates) > 0:
# print('Person is identified for face ID {} in image, with a confidence of {}.'.format(identifiedFace.face_id, identifiedFace.candidates[0].confidence)) # Get topmost confidence score
# # Verify faces
# verify_result = face_client.face.verify_face_to_person(identifiedFace.face_id, identifiedFace.candidates[0].person_id, PERSON_GROUP_ID)
# print('verification result: {}. confidence: {}'.format(verify_result.is_identical, verify_result.confidence))
# else:
# print('No person identified for face ID {} in image.'.format(identifiedFace.face_id))
# print()
# print('End of quickstart.')