Skip to content

Commit

Permalink
small fix
Browse files Browse the repository at this point in the history
  • Loading branch information
arodik committed Mar 13, 2024
1 parent 05819ac commit cbb2989
Show file tree
Hide file tree
Showing 4 changed files with 11 additions and 78 deletions.
30 changes: 9 additions & 21 deletions embedding-calculator/src/_endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,21 +48,14 @@ def face_detection_skip_check(face_plugins):
else:
return face_plugins

class JSONEncoderWithNumpy(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.float32):
return float(obj)
# You can add more type checks here if there are other specific types you need to handle.
return json.JSONEncoder.default(self, obj)

def endpoints(app):
#@app.before_first_request
#def init_model() -> None:
# detector = managers.plugin_manager.detector
# face_plugins = managers.plugin_manager.face_plugins
# face_plugins = face_detection_skip_check(face_plugins)
# detector(
# img=read_img(str(IMG_DIR / 'einstein.jpeg')),
# det_prob_threshold=_get_det_prob_threshold(),
# face_plugins=face_plugins
# )
# print("Starting to load ML models")
# return None

@app.route('/healthcheck')
def healthcheck():
return jsonify(
Expand Down Expand Up @@ -109,14 +102,9 @@ def find_faces_base64_post():
def find_faces_post():
if ENV.PYTORCH_MODE:
img = request.files['file']
#if ENV.DETECTOR_NAME == 'retinaface':
# boxes, faces = retina_detector(img)
# aaa=2
#else:
aaa = inference_detector(image_path = img)
aaa = json.dumps(aaa)
aaa = json.loads(aaa)
return jsonify(aaa)
raw_data = inference_detector(image_path = img)
serialized_data = json.dumps(raw_data, cls=JSONEncoderWithNumpy)
return json.loads(serialized_data)
else:
detector = managers.plugin_manager.detector
face_plugins = managers.plugin_manager.filter_face_plugins(
Expand Down
2 changes: 1 addition & 1 deletion embedding-calculator/src/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class ENV(Constants):
DEVICE = get_env('DEVICE', 'cpu')
RECOGNITION_MODEL = get_env('RECOGNITION_MODEL', 'ir_50')
RECOGNITION_MODEL_PATH = get_env('RECOGNITION_MODEL_PATH', 'services/facescan/plugins/adaface/pretrained/adaface_ir50_ms1mv2.ckpt')
DETECTOR_NAME = get_env('DETECTOR_NAME', 'retinaface') # mtcnn or retinaface
DETECTOR_NAME = get_env('DETECTOR_NAME', 'mtcnn') # mtcnn or retinaface


LOGGING_LEVEL = logging._nameToLevel[ENV.LOGGING_LEVEL_NAME]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,9 @@ def get_aligned_face(image_path, rgb_pil_image=None):
elif "retinaface" in detector_name:
content_type = image_path.content_type.split('/')[-1]
bboxes, faces = retina_detector(img, content_type)
#face = faces[0]
except Exception as e:
print('Face detection Failed due to error.')
print(e)
#face = None
faces = None
bboxes = None

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -368,18 +368,6 @@ def retina_detector(image_pil, content_type):

resize = 1

# testing begin
##try:
## filestr = image_path.read()
##except Exception as e:
## print(e)
##npimg = np.frombuffer(filestr, np.uint8)

###try:
### npimg = np.frombuffer(image_path, np.uint8)
###except Exception as E:
### print(E)

img_byte_arr = io.BytesIO()
try:
image_pil.save(img_byte_arr, format=content_type)
Expand All @@ -395,10 +383,6 @@ def retina_detector(image_pil, content_type):
except Exception as e:
print(e)

##try:
## img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
##except Exception as e:
## print(e)
img = np.float32(img_raw)

im_height, im_width, _ = img.shape
Expand Down Expand Up @@ -452,52 +436,15 @@ def retina_detector(image_pil, content_type):

dets = np.concatenate((dets, landms), axis=1)

#####################################################################################################

#detected = [[{"plugins_versions": {"calculator": "",
# "detector": args.network},
# "result": []}]]
# show image
#if args.save_image:
# for b in dets:
# if b[4] < args.vis_thres:
# continue
# text = "{:.4f}".format(b[4])
# b = list(map(int, b))
# face = {"box": {"probability": text,
# "x_max": b[0],
# "x_min": b[1],
# "y_max": b[2],
# "y_min": b[3],},
# "embedding": ""}

# detected[0][0]["result"].append(face)
# return detected

crop_size = (112, 112)
reference = get_reference_facial_points(default_square=crop_size[0] == crop_size[1])

#####################################################################################################
# USE DETS????? !!!!! Probably their contains boxes and landmarks at the same time ???
#####################################################################################################

faces = []
#for i, box in enumerate(boxes):
# landmark = landms[i]
# facial5points = [[landmark[j], landmark[j + 5]] for j in range(5)]
# warped_face = warp_and_crop_face(np.array(img_raw), facial5points, reference, crop_size)
# faces.append(Image.fromarray(warped_face))

for i, box in enumerate(dets):
landmark = box[5:]
facial5points = [[landmark[j], landmark[j + 5]] for j in range(5)]
warped_face = warp_and_crop_face(np.array(img_raw), facial5points, reference, crop_size)
faces.append(Image.fromarray(warped_face))

return dets, faces

#retina_detector("images/einstein-011.png")




return dets, faces

0 comments on commit cbb2989

Please sign in to comment.