From e21179964c6d0077737620cc208a5ba32be7e261 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 14 Aug 2023 14:43:52 -0500 Subject: [PATCH 001/182] index on main: 10550ad Merge pull request #23 from kabilar/main From dcf3c32cd07a919096da5bfec64d214e4b2a9d9b Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 14 Aug 2023 19:41:14 -0500 Subject: [PATCH 002/182] add facemap model table and update facemap processing to trigger facemap pose inference --- element_facemap/facial_behavior_estimation.py | 77 ++++++++++++++++++- 1 file changed, 76 insertions(+), 1 deletion(-) diff --git a/element_facemap/facial_behavior_estimation.py b/element_facemap/facial_behavior_estimation.py index 0feb73e..b40dbcb 100644 --- a/element_facemap/facial_behavior_estimation.py +++ b/element_facemap/facial_behavior_estimation.py @@ -124,7 +124,7 @@ class VideoRecording(dj.Manual): Attributes: Session (foreign key) : Primary key for Session table. - recording_id (int) : Recording ID. + recording_id (int) : Recording identification number. Device (foreign key) : Primary key for Device table. """ @@ -153,6 +153,69 @@ class File(dj.Part): """ +@schema +class BodyPart(dj.Lookup): + """Cumulative list of all body parts tracked by all facemap models + + Attributes: + body_part ( varchar(32) ): Body part short name. + body_part_description ( varchar(1000),optional ): Full description + + """ + + definition = """ + body_part : varchar(32) + --- + body_part_description='' : varchar(1000) + """ + + @classmethod + def extract_new_body_parts(cls, ): + + +@schema +class FacemapModel(dj.Manual): + """Trained Models stored in an experiment session for facial pose inference + + Attributes: + Session (foregin key) : Primary key for Session table. + model_id(int) : Count of models inserted + model_name( varchar(64) ): Name of model, filepath.stem + """ + + definition = """ + -> Session + model_id : int + model_name : varchar(64) + """ + class BodyPart(dj.Part): + """Body parts associated with a given model + + Attributes: + body_part ( varchar(32) ): Body part name, (location specfication) + body_part_description ( varchar(1000) ): Optional. Longer description.""" + + definition = """ + -> master + -> BodyPart + """ + + class File(dj.Part): + """Relative paths of facemap models with respect to facemap_root_data_dir + + Attributes: + FacemapModel (foreign key): Facemap model primary key. + file_path ( varchar(255) ): filepath of facemap model, relative to root data dir + """ + + definition = """ + -> master + file_id: int + --- + file_path: varchar(255) # model filepath, relative to root data dir + """ + + @schema class RecordingInfo(dj.Imported): """Information extracted from video file. @@ -266,6 +329,18 @@ def infer_output_dir(self, key, relative=True, mkdir=True): return output_dir.relative_to(processed_dir) if relative else output_dir +@schema +class FacemapTraining(dj.Computed): + """_summary_ + + Args: + dj (_type_): _description_ + + Returns: + _type_: _description_ + """ + + @schema class FacemapProcessing(dj.Computed): """Automated table to run Facemap with inputs from FacemapTask. From 9d8e0f72bab521f1f8b2cad2da0a0f9d6ace5efc Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 18 Aug 2023 12:10:53 -0500 Subject: [PATCH 003/182] start to add implementation of facemap processing --- element_facemap/facial_behavior_estimation.py | 111 +++++++++++------- 1 file changed, 71 insertions(+), 40 deletions(-) diff --git a/element_facemap/facial_behavior_estimation.py b/element_facemap/facial_behavior_estimation.py index b40dbcb..914a508 100644 --- a/element_facemap/facial_behavior_estimation.py +++ b/element_facemap/facial_behavior_estimation.py @@ -153,40 +153,38 @@ class File(dj.Part): """ -@schema -class BodyPart(dj.Lookup): - """Cumulative list of all body parts tracked by all facemap models +# @schema +# class BodyPart(dj.Lookup): +# """Cumulative list of all body parts tracked by all facemap models - Attributes: - body_part ( varchar(32) ): Body part short name. - body_part_description ( varchar(1000),optional ): Full description +# Attributes: +# body_part ( varchar(32) ): Body part short name. +# body_part_description ( varchar(1000),optional ): Full description - """ +# """ - definition = """ - body_part : varchar(32) - --- - body_part_description='' : varchar(1000) - """ +# definition = """ +# body_part : varchar(32) +# --- +# body_part_description='' : varchar(1000) +# """ - @classmethod - def extract_new_body_parts(cls, ): +# @classmethod +# def extract_new_body_parts(cls, ): @schema class FacemapModel(dj.Manual): - """Trained Models stored in an experiment session for facial pose inference + """Trained Models stored for facial pose inference Attributes: - Session (foregin key) : Primary key for Session table. model_id(int) : Count of models inserted model_name( varchar(64) ): Name of model, filepath.stem """ definition = """ - -> Session - model_id : int - model_name : varchar(64) + model_id : int # model index, if multiple models + model_name : varchar(64) # name of model """ class BodyPart(dj.Part): """Body parts associated with a given model @@ -197,7 +195,8 @@ class BodyPart(dj.Part): definition = """ -> master - -> BodyPart + body_part: varchar(32) + body_part_description: varchar(255) """ class File(dj.Part): @@ -283,6 +282,10 @@ def make(self, key): ) +@schema +class FacemapParams(dj.Manual): + + @schema class FacemapTask(dj.Manual): """Staging table for pairing of recording and Facemap parameters before processing. @@ -310,6 +313,7 @@ class FacemapTask(dj.Manual): do_mot_svd=1 : bool do_mov_svd=0 : bool task_description='' : varchar(128) + facemap_model_name= """ def infer_output_dir(self, key, relative=True, mkdir=True): @@ -328,19 +332,6 @@ def infer_output_dir(self, key, relative=True, mkdir=True): return output_dir.relative_to(processed_dir) if relative else output_dir - -@schema -class FacemapTraining(dj.Computed): - """_summary_ - - Args: - dj (_type_): _description_ - - Returns: - _type_: _description_ - """ - - @schema class FacemapProcessing(dj.Computed): """Automated table to run Facemap with inputs from FacemapTask. @@ -389,7 +380,7 @@ def make(self, key): ] ] # Processing performed using SVD (original facemap) - if params["do_SVD"] == True: + if params["trigger_mode"] == "SVD": output_dir = find_full_path(get_facemap_root_data_dir(), output_dir) facemap_run( video_files, @@ -401,16 +392,19 @@ def make(self, key): ) # Processing performed using externally trained deep learning models - else: - from facemap.pose import facemap_pose - + elif params["trigger_mode"] == "POSE": + model_file = (FacemapModel) + from facemap.pose import facemap_pose, facemap_network + import torch pose = facemap_pose.Pose( filenames=video_files, bbox=params["bbox"], gui=None, GUIobject=None, - model_name=str(params["model_name"]), + net, ) + facemap_model = (FacemapModel.File & f'model_name like "{params["model_name"]}"').fetch('file') + # Can make upstream train dataset table to fetch custom pretrained models to be used # Or need to insert names of the trained models into the facemap paramset @@ -422,8 +416,45 @@ def make(self, key): # Runs pose prediciton setup and predict landmarks for each video file # Save data to hdf5 file format - pose.run() - # + # pose.run() + + + # Set model name to model path, so that torch can load the model + pose.model_name = facemap_model + print("Loading model state from:", self.model_name) + pose.net.load_state_dict(torch.load(self.model_name)) + pose.net.to(pose.device) + + # Load model + model_params = torch.load(facemap_model, map_location=self.device) + channels = model_params["params"]["channels"] + kernel_size = 3 + nout = len(self.bodyparts) # number of outputs from the model + self.net = facemap_network.FMnet( + img_ch=1, + output_ch=nout, + labels_id=self.bodyparts, + channels=channels, + kernel=kernel_size, + device=self.device, + ) + + # Pose prediction setup + if not self.bbox_set: + for i in range(len(self.Ly)): + x1, x2, y1, y2 = 0, self.Ly[i], 0, self.Lx[i] + self.bbox.append([x1, x2, y1, y2]) + + # Update resize and add padding flags + if x2 - x1 != y2 - y1: # if not a square frame view then add padding + self.add_padding = True + if x2 - x1 != 256 or y2 - y1 != 256: # if not 256x256 then resize + self.resize = True + self.bbox_set = True + + # Run model inference + + _, creation_time = get_loader_result(key, FacemapTask) key = {**key, "processing_time": creation_time} From adf579c44973220a677d0193907779fdd0240d01 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 18 Aug 2023 16:13:44 -0500 Subject: [PATCH 004/182] add model inference step --- element_facemap/facial_behavior_estimation.py | 56 +++++++++++++++++-- 1 file changed, 50 insertions(+), 6 deletions(-) diff --git a/element_facemap/facial_behavior_estimation.py b/element_facemap/facial_behavior_estimation.py index 914a508..99046b0 100644 --- a/element_facemap/facial_behavior_estimation.py +++ b/element_facemap/facial_behavior_estimation.py @@ -404,19 +404,17 @@ def make(self, key): net, ) facemap_model = (FacemapModel.File & f'model_name like "{params["model_name"]}"').fetch('file') - + facemap_model_path = (FacemapModel.File & f'model_name like "{params["model_name"]}"').fetch('file_path') # Can make upstream train dataset table to fetch custom pretrained models to be used # Or need to insert names of the trained models into the facemap paramset # Need to configure downstream tables for interpretation of the outputed hdf5 file + # Run pose prediction setup # Run facial pose inference - # pose.pose_prediction_setup() # Loads model, updates resize/padding # Runs pose prediciton setup and predict landmarks for each video file # Save data to hdf5 file format - - # pose.run() # Set model name to model path, so that torch can load the model @@ -452,8 +450,54 @@ def make(self, key): self.resize = True self.bbox_set = True - # Run model inference - + # Run model inference, i.e. predict landmarks (xlabels, ylabels, likelihood) + for video_id in range(len(self.filenames[0])): + print("\nProcessing video: {}".format(self.filenames[0][video_id])) + pred_data, metadata = self.predict_landmarks(video_id) + + # Save model as hdf5 file + # Create a multi-index dict to store data in HDF5 file. First index is the scorer name, second index is the bodypart names, and third index is the coordinates (x, y, likelihood) + scorer = "Facemap" + bodyparts = self.bodyparts + data_dict = {} + data_dict[scorer] = {} + if selected_frame_ind is None: + indices = np.arange(self.cumframes[-1]) + else: + indices = selected_frame_ind + for index, bodypart in enumerate(bodyparts): + data_dict[scorer][bodypart] = {} + data_dict[scorer][bodypart]["x"] = data[:, index, 0][indices] + data_dict[scorer][bodypart]["y"] = data[:, index, 1][indices] + data_dict[scorer][bodypart]["likelihood"] = data[:, index, 2][indices] + + if self.gui is not None: + basename = self.gui.save_path + _, filename = os.path.split(self.filenames[0][video_id]) + videoname, _ = os.path.splitext(filename) + else: + basename, filename = os.path.split(self.filenames[0][video_id]) + videoname, _ = os.path.splitext(filename) + hdf5_filepath = os.path.join(basename, videoname + "_FacemapPose.h5") + with h5py.File(hdf5_filepath, "w") as f: + self.save_dict_to_hdf5(f, facemap_model_path.parent, data_dict) + return hdf5_filepath + + hdf5_filepath = os.path.join(basename, videoname + "_FacemapPose.h5") + with h5py.File(hdf5_filepath, "w") as f: + self.save_dict_to_hdf5(f, "", data_dict) + return hdf5_filepath + + + + # Save the data using h5py + savepath = self.save_data_to_hdf5(pred_data.cpu().numpy(), video_id) + print("Saved keypoints:", savepath) + # Save metadata to a pickle file + metadata_file = os.path.splitext(savepath)[0] + "_metadata.pkl" + with open(metadata_file, "wb") as f: + pickle.dump(metadata, f, pickle.HIGHEST_PROTOCOL) + print("Saved metadata:", metadata_file) _, creation_time = get_loader_result(key, FacemapTask) From c1f0a2c2ae1d71722616e52d240d4851c3492969 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 21 Aug 2023 15:52:19 -0500 Subject: [PATCH 005/182] update model dict save to h5py format --- element_facemap/facial_behavior_estimation.py | 94 +++++++------------ 1 file changed, 36 insertions(+), 58 deletions(-) diff --git a/element_facemap/facial_behavior_estimation.py b/element_facemap/facial_behavior_estimation.py index 99046b0..fb44f1c 100644 --- a/element_facemap/facial_behavior_estimation.py +++ b/element_facemap/facial_behavior_estimation.py @@ -10,6 +10,12 @@ import numpy as np from element_interface.utils import find_full_path, find_root_directory +import torch +import os +import h5py +import pickle + + schema = dj.schema() _linking_module = None @@ -153,25 +159,25 @@ class File(dj.Part): """ -# @schema -# class BodyPart(dj.Lookup): -# """Cumulative list of all body parts tracked by all facemap models - -# Attributes: -# body_part ( varchar(32) ): Body part short name. -# body_part_description ( varchar(1000),optional ): Full description +@schema +class BodyPart(dj.Lookup): + """Cumulative list of all body parts tracked by all facemap models (is this necessary?) -# """ + Attributes: + body_part ( varchar(32) ): Body part short name. + body_part_description ( varchar(1000),optional ): Full description -# definition = """ -# body_part : varchar(32) -# --- -# body_part_description='' : varchar(1000) -# """ + """ -# @classmethod -# def extract_new_body_parts(cls, ): + definition = """ + body_part : varchar(32) + --- + body_part_description='' : varchar(1000) + """ + @classmethod + def extract_new_body_parts(cls, ): + # TODO @schema class FacemapModel(dj.Manual): @@ -282,9 +288,6 @@ def make(self, key): ) -@schema -class FacemapParams(dj.Manual): - @schema class FacemapTask(dj.Manual): @@ -313,7 +316,7 @@ class FacemapTask(dj.Manual): do_mot_svd=1 : bool do_mov_svd=0 : bool task_description='' : varchar(128) - facemap_model_name= + facemap_model_name='' : varchar(32) """ def infer_output_dir(self, key, relative=True, mkdir=True): @@ -392,30 +395,20 @@ def make(self, key): ) # Processing performed using externally trained deep learning models - elif params["trigger_mode"] == "POSE": - model_file = (FacemapModel) + elif params["trigger_mode"] == "POSE": from facemap.pose import facemap_pose, facemap_network - import torch + + facemap_model = (FacemapModel.File & f'model_name like "{params["model_name"]}"').fetch('file') + facemap_model_path = (FacemapModel.File & f'model_name like "{params["model_name"]}"').fetch('file_path') + + # Instantiate Pose object, with filenames specified as video files, and bounding specified in params + # Assumes GUI to be none pose = facemap_pose.Pose( filenames=video_files, bbox=params["bbox"], gui=None, GUIobject=None, - net, ) - facemap_model = (FacemapModel.File & f'model_name like "{params["model_name"]}"').fetch('file') - facemap_model_path = (FacemapModel.File & f'model_name like "{params["model_name"]}"').fetch('file_path') - # Can make upstream train dataset table to fetch custom pretrained models to be used - # Or need to insert names of the trained models into the facemap paramset - - # Need to configure downstream tables for interpretation of the outputed hdf5 file - - # Run pose prediction setup - # Run facial pose inference - - # Runs pose prediciton setup and predict landmarks for each video file - # Save data to hdf5 file format - # Set model name to model path, so that torch can load the model pose.model_name = facemap_model @@ -455,50 +448,35 @@ def make(self, key): print("\nProcessing video: {}".format(self.filenames[0][video_id])) pred_data, metadata = self.predict_landmarks(video_id) + data = pred_data.cpu().numpy() # Save model as hdf5 file # Create a multi-index dict to store data in HDF5 file. First index is the scorer name, second index is the bodypart names, and third index is the coordinates (x, y, likelihood) scorer = "Facemap" bodyparts = self.bodyparts data_dict = {} data_dict[scorer] = {} - if selected_frame_ind is None: + if params['selected_frame_ind'] is None: indices = np.arange(self.cumframes[-1]) else: - indices = selected_frame_ind + indices = params['selected_frame_ind'] for index, bodypart in enumerate(bodyparts): data_dict[scorer][bodypart] = {} data_dict[scorer][bodypart]["x"] = data[:, index, 0][indices] data_dict[scorer][bodypart]["y"] = data[:, index, 1][indices] data_dict[scorer][bodypart]["likelihood"] = data[:, index, 2][indices] - if self.gui is not None: - basename = self.gui.save_path - _, filename = os.path.split(self.filenames[0][video_id]) - videoname, _ = os.path.splitext(filename) - else: - basename, filename = os.path.split(self.filenames[0][video_id]) - videoname, _ = os.path.splitext(filename) - hdf5_filepath = os.path.join(basename, videoname + "_FacemapPose.h5") - with h5py.File(hdf5_filepath, "w") as f: - self.save_dict_to_hdf5(f, facemap_model_path.parent, data_dict) - return hdf5_filepath + basename, filename = os.path.split(self.filenames[0][video_id]) + videoname, _ = os.path.splitext(filename) hdf5_filepath = os.path.join(basename, videoname + "_FacemapPose.h5") with h5py.File(hdf5_filepath, "w") as f: - self.save_dict_to_hdf5(f, "", data_dict) - return hdf5_filepath - - + self.save_dict_to_hdf5(f, facemap_model_path.parent, data_dict) - # Save the data using h5py - savepath = self.save_data_to_hdf5(pred_data.cpu().numpy(), video_id) - print("Saved keypoints:", savepath) # Save metadata to a pickle file - metadata_file = os.path.splitext(savepath)[0] + "_metadata.pkl" + metadata_file = os.path.splitext(output_dir)[0] + "_metadata.pkl" with open(metadata_file, "wb") as f: pickle.dump(metadata, f, pickle.HIGHEST_PROTOCOL) print("Saved metadata:", metadata_file) - _, creation_time = get_loader_result(key, FacemapTask) key = {**key, "processing_time": creation_time} From 2506bcb174ad61014d80cc6f48f150f075c64fbc Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 8 Sep 2023 15:31:40 -0500 Subject: [PATCH 006/182] add new facial_pose_model file allowing to run model inference --- element_facemap/facial_pose_model.py | 446 +++++++++++++++++++++++++++ 1 file changed, 446 insertions(+) create mode 100644 element_facemap/facial_pose_model.py diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py new file mode 100644 index 0000000..10166d0 --- /dev/null +++ b/element_facemap/facial_pose_model.py @@ -0,0 +1,446 @@ +import importlib +import inspect +from datetime import datetime +from glob import glob +from pathlib import Path +from typing import List, Tuple + +import cv2 +import datajoint as dj +import numpy as np +from element_interface.utils import find_full_path, find_root_directory + +import torch +import os +import h5py +import pickle +from . import facial_behavior_estimation as fbe + +schema = dj.schema() + +_linking_module = None + + +def activate( + facemap_model_schema_name, + fbe_schema_name=None, + *, + create_schema=True, + create_tables=True, + linking_module=None, +): + """Activate schema. + + Args: + facemap_model_schema_name (str): Schema name on the database server to activate the + `facemap_pose_model` schema of element-facemap + fbe_schema_name (str): Schema name on the database server to activate the 'facial_behavioral_estimation + create_schema (bool): When True (default), create schema in the database if it + does not yet exist. + create_tables (bool): When True (default), create tables in the database if + they do not yet exist. + linking_module (str): A module name or a module containing the required + dependencies to activate the `facial_behavior_estimation` module: + + Dependencies: + Upstream tables: + + Session: A parent table to VideoRecording, identifying a recording session + + Equipment: A parent table to VideoRecording, identifying video recording equipment + + VideoRecording: A parent table to FacemapInferenceTask, identifying videos to be used in inference + Functions: + + get_facemap_root_data_dir() -> list + Retrieves the root data directory(s) with face recordings for all + subject/sessions. Returns a string for the full path to the root data directory. + + get_facemap_processed_data_dir(session_key: dict) -> str + Optional function to retrieve the desired output directory + for Facemap files for a given session. If unspecified, + the output is stored in the video folder for the session, which is the default behavior of Facemap. + Returns a string of the absolute path of the output directory. + """ + if isinstance(linking_module, str): + linking_module = importlib.import_module(linking_module) + assert inspect.ismodule( + linking_module + ), "The argument 'dependency' must be a module's name or a module" + assert hasattr( + linking_module, "get_facemap_root_data_dir" + ), "The linking module must specify a lookup function for a root data directory" + + global _linking_module + _linking_module = linking_module + + # activate facial behavioral extimation (fbe) schema + fbe.activate( + fbe_schema_name, + create_schema=create_schema, + create_tables=create_tables, + linking_module=linking_module, + ) + + # activate facial pose model schema + schema.activate( + facemap_model_schema_name, + create_schema=create_schema, + create_tables=create_tables, + add_objects=_linking_module.__dict__, + ) + + +# ----------------------------- Table declarations ---------------------- +@schema +class BodyPart(dj.Lookup): + """Body parts tracked by DeepLabCut models + + Attributes: + body_part ( varchar(32) ): Body part short name. + body_part_description ( varchar(1000),optional ): Full description + + """ + + definition = """ + body_part : varchar(32) + --- + body_part_description='' : varchar(1000) + """ + + +@schema +class FacemapModel(dj.Manual): + """Trained Models stored for facial pose inference + + Attributes: + model_id(int) : File identification number, located in filename + model_name( varchar(64) ): Name of model, filepath.stem + """ + + definition = """ + model_id : int # user assigned ID associated with a unique model + --- + model_name : varchar(64) # name of model + model_description: varchar(1000) # optional model description + """ + + class BodyPart(dj.Part): + """Body parts associated with a given model + + Attributes: + body_part ( varchar(32) ): Body part name, (location specfication) + body_part_description ( varchar(1000) ): Optional. Longer description.""" + + definition = """ + -> master + -> BodyPart + """ + + class File(dj.Part): + """Relative paths of facemap models with respect to facemap_root_data_dir + + Attributes: + FacemapModel (foreign key): Facemap model primary key. + model_file ( attach ): filepath of facemap model, relative to root data dir + """ + + definition = """ + -> master + --- + model_file: attach # model file attachment + """ + + +@schema +class FacemapPoseEstimationTask(dj.Manual): + """Staging table for pairing of recording and Facemap parameters before processing. + + Attributes: + fbe.VideoRecording (foreign key) : Primary key for VideoRecording table. + FacemapModel (foreign key) : Primary key for the facemap model table + facemap_task_id (smallint) : Facemap task ID + facemap_output_dir ( varchar(255), optional) : output dir storing the results + of Facemap analysis. + task_mode (enum) : Default load. Load or trigger analysis. + bbox (longblob) : Bounding box for cropping the video [x1, x2, y1, y2]. If not set, entire frame is used. + bbox_set (bool) : True if bbox is set, False if not set. + task_description ( varchar(128), optional) : Task description. + """ + + definition = """ + # Staging table for pairing of recording and Facemap parameters before processing. + -> fbe.VideoRecording + -> FacemapModel + --- + facemap_output_dir='' : varchar(255) # output directory - storing the results of Facemap analysis + task_mode='trigger' : enum('load', 'trigger') + bbox=null : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] + task_description='' : varchar(128) + """ + + def infer_output_dir(self, key, relative=True, mkdir=True): + video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0] + video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent + root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) + + paramset_key = (FacemapPoseEstimationTask & key).fetch1("facemap_task_id") + processed_dir = Path(fbe.get_facemap_processed_data_dir()) + output_dir = ( + processed_dir / video_dir.relative_to(root_dir) / f"facemap_{paramset_key}" + ) + + if mkdir: + output_dir.mkdir(parents=True, exist_ok=True) + + return output_dir.relative_to(processed_dir) if relative else output_dir + + +@schema +class FacemapProcessing(dj.Computed): + """Automated table to run Facemap with inputs from FacemapTask. + + Attributes: + FacemapTask (foreign key) : Primary key for FacemapTask table. + processing_time (datetime) : Time of generation of the facemap results. + package_version ( varchar(16), optional) : Facemap package version. + """ + + definition = """ + # Processing Procedure + -> FacemapPoseEstimationTask + --- + processing_time : datetime # time of generation of the facemap results + package_version='' : varchar(16) + """ + + # Process only the VideoRecordings that have their Info inserted. + @property + def key_source(self): + """Limits the population of FacemapProcessing to those that have VideoRecording.File defined.""" + return FacemapPoseEstimationTask & fbe.VideoRecording.File + + def make(self, key): + """Runs Facemap""" + + task_mode = (FacemapPoseEstimationTask & key).fetch1("task_mode") + + output_dir = (FacemapPoseEstimationTask & key).fetch1("facemap_output_dir") + if not output_dir: + output_dir = FacemapPoseEstimationTask().infer_output_dir( + key, relative=True, mkdir=True + ) + # update processing_output_dir + FacemapPoseEstimationTask.update1( + {**key, "facemap_output_dir": output_dir.as_posix()} + ) + + if task_mode == "trigger": + from facemap import utils + from facemap.pose import refine_pose, model_loader, pose as facemap_pose + + params = (FacemapPoseEstimationTask & key).fetch1("facemap_params") + + video_files = ( + FacemapPoseEstimationTask * fbe.VideoRecording.File & key + ).fetch("file_path") + video_files = [ + [ + find_full_path(get_facemap_root_data_dir(), video_file).as_posix() + for video_file in video_files + ] + ] + # Processing performed using externally trained deep learning models + + # MAKE THIS A FULL PATH USING FIND_FULL_PATH + # facemap_model_path = (FacemapModel.File & f'model_name like "{params["model_name"]}"').fetch('file_path') + + # Model Name of interest should be specified by user during facemap task params manual update + model_name = (FacemapPoseEstimationTask & key).fetch("model_name") + # Fetches file attachment + facemap_model_path = ( + FacemapModel.File & f'model_name="{model_name}"' + ).fetch("file") + # move this "facemap_model_path" to the facemap model root directory + models_root_dir = model_loader.get_models_dir() + import shutil + + shutil.copy( + find_full_path(get_facemap_root_data_dir(), facemap_model_path), + models_root_dir, + ) + + # Instantiate Pose object, with filenames specified as video files, and bounding specified in params + # Assumes GUI to be none as we are running CLI implementation + pose = facemap_pose.Pose( + filenames=video_files, + bbox=(FacemapPoseEstimationTask & key).fetch1( + "bbox" + ), # should be manually inserted into params via jupyter notebook cell + bbox_set=(FacemapPoseEstimationTask & key).fetch1("bbox_set"), + gui=None, + GUIobject=None, + model_name=facemap_model_path.stem, + output_dir=output_dir, + ) + pose.run() + + keypoints_filepath = find_full_path(get_facemap_root_data_dir(), output_dir) + keypoints_data = utils.load_keypoints() + + _, creation_time = get_loader_result(key, FacemapTask) + key = {**key, "processing_time": creation_time} + + self.insert1(key) + + +@schema +class FacemapPoseEstimation(dj.Computed): + """Results of facemap pose estimation + + Attributes: + FacemapPoseEstimationTask (foreign key): Pose Estimation Task key. + post_estimation_time (datetime): time of generation of this set of facemap results. + execution_duration (datetime): duration of model + """ + + definition = """ + -> FacemapPoseEstimationTask + --- + pose_estimation_time: datetime # time of generation of this set of facemap results + pose_estimation_duration: float # seconds + total_frame_count: int # frame count across all video files + """ + + class BodyPartPosition(dj.Part): + """Position of individual body parts by frame index + + Attributes: + PoseEstimation (foreign key): Pose Estimation key. + FacemapModel.BodyPart (foreign key): Body Part key. + frame_index (longblob): Frame index in model. + x_pos (longblob): X position. + y_pos (longblob): Y position. + likelihood (longblob): Model confidence.""" + + definition = """ # uses facemap h5 output for body part position + -> master + -> FacemapModel.BodyPart + --- + frame_index : longblob # frame index in model + x_pos : longblob + y_pos : longblob + likelihood : longblob + """ + + def make(self, key): + """.populate() method will launch training for each PoseEstimationTask""" + # ID model and directories + task_mode, output_dir = (FacemapPoseEstimationTask & key).fetch1( + "task_mode", "pose_estimation_output_dir" + ) + + output_dir = find_full_path(fbe.get_facemap_root_data_dir(), output_dir) + + # Triger PoseEstimation + if task_mode == "trigger": + # Triggering facemap for pose estimation required: + # - model_path: full path to the directory containing the trained model + # - video_filepaths: full paths to the video files for inference + # - analyze_video_params: optional parameters to analyze video + + from facemap.process import run as facemap_run + from facemap import utils + + bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") + + video_files = ( + FacemapPoseEstimationTask * fbe.VideoRecording.File & key + ).fetch("file_path") + video_files = [ + [ + find_full_path( + fbe.get_facemap_root_data_dir(), video_file + ).as_posix() + for video_file in video_files + ] + ] + # Model Name of interest should be specified by user during facemap task params manual update + model_name = (FacemapPoseEstimationTask & key).fetch("model_name") + # Fetches file attachment + facemap_model_name = ( + FacemapModel.File & f'model_name="{model_name}"' + ).fetch("file") + facemap_model_path = Path.cwd() / facemap_model_name + + # move this "facemap_model_path" to the facemap model root directory + models_root_dir = model_loader.get_models_dir() + import shutil + + facemap_model_path.copy(facemap_model_path, models_root_dir) + model_output_path = Path(models_root_dir) / facemap_model_name + # copy using pathlib + model_output_path.write_text(facemap_model_path.read_text()) + + # Processing performed using externally trained deep learning models + from facemap.pose import pose as facemap_pose, model_loader + + # Instantiate Pose object, with filenames specified as video files, and bounding specified in params + # Assumes GUI to be none as we are running CLI implementation + pose = facemap_pose.Pose( + filenames=video_files, + bbox=bbox, # should be manually inserted into params via jupyter notebook cell + bbox_set=bool(bbox), + model_name=facemap_model_path.stem, + output_dir=output_dir, + ) + pose.run() + + # look into facemap naming function + + facemap_result_path = next(output_dir.glob("*.h5")) + + # only 1 .h5 model output + full_metadata_path = next(output_dir.glob("*.pkl")) + + # only 1 metadata.pkl inference output + with open(full_metadata_path, "rb") as f: + metadata = pickle.load(f) + + facemap_result = utils.load_keypoints( + metadata["bodyparts"], facemap_result_path + ) + # facemap_result is a 3D nested array with D1 - (x,y likelihood) D2 - bodyparts D3 - frame count + # body parts are ordered the same way as stored + + keypoints_data = utils.load_keypoints( + refine_pose.BODYPARTS, keypoints_filepath + ) + + pose_x_coord = keypoints_data[0, :, :] # (bodyparts, frames) + pose_y_coord = keypoints_data[1, :, :] # (bodyparts, frames) + pose_likelihood = keypoints_data[2, :, :] # (bodyparts, frames) + + for b_idx, bodypart in enumerate(refine_pose.BODYPARTS): + entry = { + "body_part": bodypart, + "x_pos": pose_x_coord[b_idx], + "y_pos": pose_y_coord[b_idx], + "likelihood": pose_likelihood[b_idx], + } + + # body_parts = [ + # { + # **key, + # "body_part": metadata["bodyparts"][y_], + # "frame_index": facemap_result[x_][y_][:], + # "x_pos": facemap_result[0][y_][likelihood_], + # "y_pos": facemap_result[1][y_][likelihood_], + # "likelihood": facemap_result[2][y_][likelihood_], + # } + # for x_ in range(facemap_result.shape[0]) + # for in range(facemap_result.shape[1]) + # for likelihood_ in range(facemap_result.shape[2]) + # ] + + creation_time = datetime.fromtimestamp(dlc_result.creation_time).strftime( + "%Y-%m-%d %H:%M:%S" + ) + + self.insert1({**key, "pose_estimation_time": creation_time}) + self.BodyPartPosition.insert(body_parts) From ef27a563c48228807751a624351d11f8cfc04e42 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 8 Sep 2023 16:00:37 -0500 Subject: [PATCH 007/182] formatting --- element_facemap/facial_pose_model.py | 150 +++------------------------ 1 file changed, 17 insertions(+), 133 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 10166d0..0489739 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -143,7 +143,7 @@ class File(dj.Part): definition = """ -> master --- - model_file: attach # model file attachment + model_file: attach # model file attachment """ @@ -191,104 +191,6 @@ def infer_output_dir(self, key, relative=True, mkdir=True): return output_dir.relative_to(processed_dir) if relative else output_dir -@schema -class FacemapProcessing(dj.Computed): - """Automated table to run Facemap with inputs from FacemapTask. - - Attributes: - FacemapTask (foreign key) : Primary key for FacemapTask table. - processing_time (datetime) : Time of generation of the facemap results. - package_version ( varchar(16), optional) : Facemap package version. - """ - - definition = """ - # Processing Procedure - -> FacemapPoseEstimationTask - --- - processing_time : datetime # time of generation of the facemap results - package_version='' : varchar(16) - """ - - # Process only the VideoRecordings that have their Info inserted. - @property - def key_source(self): - """Limits the population of FacemapProcessing to those that have VideoRecording.File defined.""" - return FacemapPoseEstimationTask & fbe.VideoRecording.File - - def make(self, key): - """Runs Facemap""" - - task_mode = (FacemapPoseEstimationTask & key).fetch1("task_mode") - - output_dir = (FacemapPoseEstimationTask & key).fetch1("facemap_output_dir") - if not output_dir: - output_dir = FacemapPoseEstimationTask().infer_output_dir( - key, relative=True, mkdir=True - ) - # update processing_output_dir - FacemapPoseEstimationTask.update1( - {**key, "facemap_output_dir": output_dir.as_posix()} - ) - - if task_mode == "trigger": - from facemap import utils - from facemap.pose import refine_pose, model_loader, pose as facemap_pose - - params = (FacemapPoseEstimationTask & key).fetch1("facemap_params") - - video_files = ( - FacemapPoseEstimationTask * fbe.VideoRecording.File & key - ).fetch("file_path") - video_files = [ - [ - find_full_path(get_facemap_root_data_dir(), video_file).as_posix() - for video_file in video_files - ] - ] - # Processing performed using externally trained deep learning models - - # MAKE THIS A FULL PATH USING FIND_FULL_PATH - # facemap_model_path = (FacemapModel.File & f'model_name like "{params["model_name"]}"').fetch('file_path') - - # Model Name of interest should be specified by user during facemap task params manual update - model_name = (FacemapPoseEstimationTask & key).fetch("model_name") - # Fetches file attachment - facemap_model_path = ( - FacemapModel.File & f'model_name="{model_name}"' - ).fetch("file") - # move this "facemap_model_path" to the facemap model root directory - models_root_dir = model_loader.get_models_dir() - import shutil - - shutil.copy( - find_full_path(get_facemap_root_data_dir(), facemap_model_path), - models_root_dir, - ) - - # Instantiate Pose object, with filenames specified as video files, and bounding specified in params - # Assumes GUI to be none as we are running CLI implementation - pose = facemap_pose.Pose( - filenames=video_files, - bbox=(FacemapPoseEstimationTask & key).fetch1( - "bbox" - ), # should be manually inserted into params via jupyter notebook cell - bbox_set=(FacemapPoseEstimationTask & key).fetch1("bbox_set"), - gui=None, - GUIobject=None, - model_name=facemap_model_path.stem, - output_dir=output_dir, - ) - pose.run() - - keypoints_filepath = find_full_path(get_facemap_root_data_dir(), output_dir) - keypoints_data = utils.load_keypoints() - - _, creation_time = get_loader_result(key, FacemapTask) - key = {**key, "processing_time": creation_time} - - self.insert1(key) - - @schema class FacemapPoseEstimation(dj.Computed): """Results of facemap pose estimation @@ -343,8 +245,7 @@ def make(self, key): # - model_path: full path to the directory containing the trained model # - video_filepaths: full paths to the video files for inference # - analyze_video_params: optional parameters to analyze video - - from facemap.process import run as facemap_run + from facemap.pose import pose as facemap_pose, model_loader from facemap import utils bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") @@ -352,6 +253,7 @@ def make(self, key): video_files = ( FacemapPoseEstimationTask * fbe.VideoRecording.File & key ).fetch("file_path") + video_files = [ [ find_full_path( @@ -370,18 +272,18 @@ def make(self, key): # move this "facemap_model_path" to the facemap model root directory models_root_dir = model_loader.get_models_dir() - import shutil - - facemap_model_path.copy(facemap_model_path, models_root_dir) model_output_path = Path(models_root_dir) / facemap_model_name - # copy using pathlib - model_output_path.write_text(facemap_model_path.read_text()) + # import shutil + # shutil.copy(facemap_model_path, models_root_dir) + + # copy using pathlib (validate that model can still be loaded by pytorch) + model_output_path.write_bytes(facemap_model_path.read_bytes()) # Processing performed using externally trained deep learning models - from facemap.pose import pose as facemap_pose, model_loader # Instantiate Pose object, with filenames specified as video files, and bounding specified in params # Assumes GUI to be none as we are running CLI implementation + pose = facemap_pose.Pose( filenames=video_files, bbox=bbox, # should be manually inserted into params via jupyter notebook cell @@ -393,7 +295,7 @@ def make(self, key): # look into facemap naming function - facemap_result_path = next(output_dir.glob("*.h5")) + facemap_result_path = next(output_dir.glob("*{}.h5")) # only 1 .h5 model output full_metadata_path = next(output_dir.glob("*.pkl")) @@ -402,45 +304,27 @@ def make(self, key): with open(full_metadata_path, "rb") as f: metadata = pickle.load(f) - facemap_result = utils.load_keypoints( + keypoints_data = utils.load_keypoints( metadata["bodyparts"], facemap_result_path ) # facemap_result is a 3D nested array with D1 - (x,y likelihood) D2 - bodyparts D3 - frame count # body parts are ordered the same way as stored - keypoints_data = utils.load_keypoints( - refine_pose.BODYPARTS, keypoints_filepath - ) - pose_x_coord = keypoints_data[0, :, :] # (bodyparts, frames) pose_y_coord = keypoints_data[1, :, :] # (bodyparts, frames) pose_likelihood = keypoints_data[2, :, :] # (bodyparts, frames) - for b_idx, bodypart in enumerate(refine_pose.BODYPARTS): - entry = { + for b_idx, bodypart in enumerate(metadata["bodyparts"]): + body_part_position_entry = { "body_part": bodypart, "x_pos": pose_x_coord[b_idx], "y_pos": pose_y_coord[b_idx], "likelihood": pose_likelihood[b_idx], } - # body_parts = [ - # { - # **key, - # "body_part": metadata["bodyparts"][y_], - # "frame_index": facemap_result[x_][y_][:], - # "x_pos": facemap_result[0][y_][likelihood_], - # "y_pos": facemap_result[1][y_][likelihood_], - # "likelihood": facemap_result[2][y_][likelihood_], - # } - # for x_ in range(facemap_result.shape[0]) - # for in range(facemap_result.shape[1]) - # for likelihood_ in range(facemap_result.shape[2]) - # ] - - creation_time = datetime.fromtimestamp(dlc_result.creation_time).strftime( - "%Y-%m-%d %H:%M:%S" - ) + creation_time = datetime.fromtimestamp( + full_metadata_path.stat().st_mtime + ).strftime("%Y-%m-%d %H:%M:%S") self.insert1({**key, "pose_estimation_time": creation_time}) - self.BodyPartPosition.insert(body_parts) + self.BodyPartPosition.insert(body_part_position_entry) From 81c4ebe9a811179f1de7ffe9fe04a2f7f9655ec0 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 8 Sep 2023 16:33:41 -0500 Subject: [PATCH 008/182] revert fbe changes --- element_facemap/facial_behavior_estimation.py | 180 ++---------------- 1 file changed, 13 insertions(+), 167 deletions(-) diff --git a/element_facemap/facial_behavior_estimation.py b/element_facemap/facial_behavior_estimation.py index fb44f1c..bce3907 100644 --- a/element_facemap/facial_behavior_estimation.py +++ b/element_facemap/facial_behavior_estimation.py @@ -10,12 +10,6 @@ import numpy as np from element_interface.utils import find_full_path, find_root_directory -import torch -import os -import h5py -import pickle - - schema = dj.schema() _linking_module = None @@ -130,7 +124,7 @@ class VideoRecording(dj.Manual): Attributes: Session (foreign key) : Primary key for Session table. - recording_id (int) : Recording identification number. + recording_id (int) : Recording ID. Device (foreign key) : Primary key for Device table. """ @@ -147,7 +141,7 @@ class File(dj.Part): Attributes: master (foreign key) : Primary key for VideoRecording table. - file_id (smallint) : File identification number. + file_id (smallint) : File ID. file_path ( varchar(255) ) : Filepath of video, relative to root directory. """ @@ -159,68 +153,6 @@ class File(dj.Part): """ -@schema -class BodyPart(dj.Lookup): - """Cumulative list of all body parts tracked by all facemap models (is this necessary?) - - Attributes: - body_part ( varchar(32) ): Body part short name. - body_part_description ( varchar(1000),optional ): Full description - - """ - - definition = """ - body_part : varchar(32) - --- - body_part_description='' : varchar(1000) - """ - - @classmethod - def extract_new_body_parts(cls, ): - # TODO - -@schema -class FacemapModel(dj.Manual): - """Trained Models stored for facial pose inference - - Attributes: - model_id(int) : Count of models inserted - model_name( varchar(64) ): Name of model, filepath.stem - """ - - definition = """ - model_id : int # model index, if multiple models - model_name : varchar(64) # name of model - """ - class BodyPart(dj.Part): - """Body parts associated with a given model - - Attributes: - body_part ( varchar(32) ): Body part name, (location specfication) - body_part_description ( varchar(1000) ): Optional. Longer description.""" - - definition = """ - -> master - body_part: varchar(32) - body_part_description: varchar(255) - """ - - class File(dj.Part): - """Relative paths of facemap models with respect to facemap_root_data_dir - - Attributes: - FacemapModel (foreign key): Facemap model primary key. - file_path ( varchar(255) ): filepath of facemap model, relative to root data dir - """ - - definition = """ - -> master - file_id: int - --- - file_path: varchar(255) # model filepath, relative to root data dir - """ - - @schema class RecordingInfo(dj.Imported): """Information extracted from video file. @@ -288,7 +220,6 @@ def make(self, key): ) - @schema class FacemapTask(dj.Manual): """Staging table for pairing of recording and Facemap parameters before processing. @@ -316,7 +247,6 @@ class FacemapTask(dj.Manual): do_mot_svd=1 : bool do_mov_svd=0 : bool task_description='' : varchar(128) - facemap_model_name='' : varchar(32) """ def infer_output_dir(self, key, relative=True, mkdir=True): @@ -335,6 +265,7 @@ def infer_output_dir(self, key, relative=True, mkdir=True): return output_dir.relative_to(processed_dir) if relative else output_dir + @schema class FacemapProcessing(dj.Computed): """Automated table to run Facemap with inputs from FacemapTask. @@ -382,101 +313,16 @@ def make(self, key): for video_file in video_files ] ] - # Processing performed using SVD (original facemap) - if params["trigger_mode"] == "SVD": - output_dir = find_full_path(get_facemap_root_data_dir(), output_dir) - facemap_run( - video_files, - sbin=params["sbin"], - proc=params, - savepath=output_dir.as_posix(), - motSVD=params["motSVD"], - movSVD=params["movSVD"], - ) - # Processing performed using externally trained deep learning models - elif params["trigger_mode"] == "POSE": - from facemap.pose import facemap_pose, facemap_network - - facemap_model = (FacemapModel.File & f'model_name like "{params["model_name"]}"').fetch('file') - facemap_model_path = (FacemapModel.File & f'model_name like "{params["model_name"]}"').fetch('file_path') - - # Instantiate Pose object, with filenames specified as video files, and bounding specified in params - # Assumes GUI to be none - pose = facemap_pose.Pose( - filenames=video_files, - bbox=params["bbox"], - gui=None, - GUIobject=None, - ) - - # Set model name to model path, so that torch can load the model - pose.model_name = facemap_model - print("Loading model state from:", self.model_name) - pose.net.load_state_dict(torch.load(self.model_name)) - pose.net.to(pose.device) - - # Load model - model_params = torch.load(facemap_model, map_location=self.device) - channels = model_params["params"]["channels"] - kernel_size = 3 - nout = len(self.bodyparts) # number of outputs from the model - self.net = facemap_network.FMnet( - img_ch=1, - output_ch=nout, - labels_id=self.bodyparts, - channels=channels, - kernel=kernel_size, - device=self.device, - ) - - # Pose prediction setup - if not self.bbox_set: - for i in range(len(self.Ly)): - x1, x2, y1, y2 = 0, self.Ly[i], 0, self.Lx[i] - self.bbox.append([x1, x2, y1, y2]) - - # Update resize and add padding flags - if x2 - x1 != y2 - y1: # if not a square frame view then add padding - self.add_padding = True - if x2 - x1 != 256 or y2 - y1 != 256: # if not 256x256 then resize - self.resize = True - self.bbox_set = True - - # Run model inference, i.e. predict landmarks (xlabels, ylabels, likelihood) - for video_id in range(len(self.filenames[0])): - print("\nProcessing video: {}".format(self.filenames[0][video_id])) - pred_data, metadata = self.predict_landmarks(video_id) - - data = pred_data.cpu().numpy() - # Save model as hdf5 file - # Create a multi-index dict to store data in HDF5 file. First index is the scorer name, second index is the bodypart names, and third index is the coordinates (x, y, likelihood) - scorer = "Facemap" - bodyparts = self.bodyparts - data_dict = {} - data_dict[scorer] = {} - if params['selected_frame_ind'] is None: - indices = np.arange(self.cumframes[-1]) - else: - indices = params['selected_frame_ind'] - for index, bodypart in enumerate(bodyparts): - data_dict[scorer][bodypart] = {} - data_dict[scorer][bodypart]["x"] = data[:, index, 0][indices] - data_dict[scorer][bodypart]["y"] = data[:, index, 1][indices] - data_dict[scorer][bodypart]["likelihood"] = data[:, index, 2][indices] - - - basename, filename = os.path.split(self.filenames[0][video_id]) - videoname, _ = os.path.splitext(filename) - hdf5_filepath = os.path.join(basename, videoname + "_FacemapPose.h5") - with h5py.File(hdf5_filepath, "w") as f: - self.save_dict_to_hdf5(f, facemap_model_path.parent, data_dict) - - # Save metadata to a pickle file - metadata_file = os.path.splitext(output_dir)[0] + "_metadata.pkl" - with open(metadata_file, "wb") as f: - pickle.dump(metadata, f, pickle.HIGHEST_PROTOCOL) - print("Saved metadata:", metadata_file) + output_dir = find_full_path(get_facemap_root_data_dir(), output_dir) + facemap_run( + video_files, + sbin=params["sbin"], + proc=params, + savepath=output_dir.as_posix(), + motSVD=params["motSVD"], + movSVD=params["movSVD"], + ) _, creation_time = get_loader_result(key, FacemapTask) key = {**key, "processing_time": creation_time} @@ -492,7 +338,7 @@ class FacialSignal(dj.Imported): FacemapProcessing (foreign key) : Primary key for FacemapProcessing table. """ - definition = """ # Facemap results + definition = """# Facemap results -> FacemapProcessing """ From 9a4265aa012acc7383e6813666b073623a9aabc3 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 14 Sep 2023 15:53:49 -0500 Subject: [PATCH 009/182] change facemap_output_dir to pose_estimation_output_dir --- element_facemap/facial_pose_model.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 0489739..7ed5979 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -89,7 +89,7 @@ def activate( # ----------------------------- Table declarations ---------------------- @schema class BodyPart(dj.Lookup): - """Body parts tracked by DeepLabCut models + """Body parts tracked by Facemap models Attributes: body_part ( varchar(32) ): Body part short name. @@ -154,12 +154,10 @@ class FacemapPoseEstimationTask(dj.Manual): Attributes: fbe.VideoRecording (foreign key) : Primary key for VideoRecording table. FacemapModel (foreign key) : Primary key for the facemap model table - facemap_task_id (smallint) : Facemap task ID - facemap_output_dir ( varchar(255), optional) : output dir storing the results - of Facemap analysis. - task_mode (enum) : Default load. Load or trigger analysis. + pose_estimation_output_dir ( varchar(255), optional) : output dir storing the results + of pose estimation analysis. + task_mode (enum) : Default trigger. Load or trigger analysis. bbox (longblob) : Bounding box for cropping the video [x1, x2, y1, y2]. If not set, entire frame is used. - bbox_set (bool) : True if bbox is set, False if not set. task_description ( varchar(128), optional) : Task description. """ @@ -168,10 +166,10 @@ class FacemapPoseEstimationTask(dj.Manual): -> fbe.VideoRecording -> FacemapModel --- - facemap_output_dir='' : varchar(255) # output directory - storing the results of Facemap analysis - task_mode='trigger' : enum('load', 'trigger') - bbox=null : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] - task_description='' : varchar(128) + pose_estimation_output_dir='' : varchar(255) # output dir - stores results of Facemap Pose estimation analysis + task_mode='trigger' : enum('load', 'trigger') + bbox=null : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] + task_description='' : varchar(128) """ def infer_output_dir(self, key, relative=True, mkdir=True): From 0ceb9a982c582f1dcb7eac2fb0861644f8d3c782 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 14 Sep 2023 16:58:56 -0500 Subject: [PATCH 010/182] change idx from model_name to model_id --- element_facemap/facial_pose_model.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 7ed5979..8a443c8 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -261,11 +261,11 @@ def make(self, key): ] ] # Model Name of interest should be specified by user during facemap task params manual update - model_name = (FacemapPoseEstimationTask & key).fetch("model_name") + model_id = (FacemapPoseEstimationTask & key).fetch("model_id") # Fetches file attachment - facemap_model_name = ( - FacemapModel.File & f'model_name="{model_name}"' - ).fetch("file") + facemap_model_name = (FacemapModel.File & f'model_id="{model_id}"').fetch( + "file" + ) facemap_model_path = Path.cwd() / facemap_model_name # move this "facemap_model_path" to the facemap model root directory From d390804ba754bb70e5756aeec549b1430ad9ed91 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 14 Sep 2023 17:09:43 -0500 Subject: [PATCH 011/182] fetch model_file --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 8a443c8..1a53137 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -264,7 +264,7 @@ def make(self, key): model_id = (FacemapPoseEstimationTask & key).fetch("model_id") # Fetches file attachment facemap_model_name = (FacemapModel.File & f'model_id="{model_id}"').fetch( - "file" + "model_file" ) facemap_model_path = Path.cwd() / facemap_model_name From 47b4501784ca75f8be48d835b3b3b7569e751802 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 14 Sep 2023 17:20:27 -0500 Subject: [PATCH 012/182] change to fetch1 --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 1a53137..56d3fc1 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -263,7 +263,7 @@ def make(self, key): # Model Name of interest should be specified by user during facemap task params manual update model_id = (FacemapPoseEstimationTask & key).fetch("model_id") # Fetches file attachment - facemap_model_name = (FacemapModel.File & f'model_id="{model_id}"').fetch( + facemap_model_name = (FacemapModel.File & f'model_id="{model_id}"').fetch1( "model_file" ) facemap_model_path = Path.cwd() / facemap_model_name From 6c09b00c94e90d64013d7c31064ffd21a962feef Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 14 Sep 2023 17:43:12 -0500 Subject: [PATCH 013/182] copy facemap output to specified output dir --- element_facemap/facial_pose_model.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 56d3fc1..040816c 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -287,16 +287,19 @@ def make(self, key): bbox=bbox, # should be manually inserted into params via jupyter notebook cell bbox_set=bool(bbox), model_name=facemap_model_path.stem, - output_dir=output_dir, ) pose.run() # look into facemap naming function - - facemap_result_path = next(output_dir.glob("*{}.h5")) + facemap_output_dir = Path.cwd() + facemap_result_path = next(facemap_output_dir.glob("*{}.h5")) # only 1 .h5 model output - full_metadata_path = next(output_dir.glob("*.pkl")) + full_metadata_path = next(facemap_output_dir.glob("*.pkl")) + + # copy local facemap output to output directory + facemap_result_path.write_bytes(output_dir.read_bytes()) + full_metadata_path.write_bytes(output_dir.read_bytes()) # only 1 metadata.pkl inference output with open(full_metadata_path, "rb") as f: From 7eb1278faeff9943d2102721c41d9e62a79cc256 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 14 Sep 2023 17:45:10 -0500 Subject: [PATCH 014/182] use vid name to specify output model and metadata files --- element_facemap/facial_pose_model.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 040816c..a465d5a 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -260,6 +260,7 @@ def make(self, key): for video_file in video_files ] ] + vid_name = Path(video_files[0]).stem # Model Name of interest should be specified by user during facemap task params manual update model_id = (FacemapPoseEstimationTask & key).fetch("model_id") # Fetches file attachment @@ -289,13 +290,12 @@ def make(self, key): model_name=facemap_model_path.stem, ) pose.run() - + video_files[0] # look into facemap naming function - facemap_output_dir = Path.cwd() - facemap_result_path = next(facemap_output_dir.glob("*{}.h5")) + facemap_result_path = next(model_output_path.glob(f"*{vid_name}*.h5")) # only 1 .h5 model output - full_metadata_path = next(facemap_output_dir.glob("*.pkl")) + full_metadata_path = next(model_output_path.glob(f"*{vid_name}*.pkl")) # copy local facemap output to output directory facemap_result_path.write_bytes(output_dir.read_bytes()) From 9c958fd2e529db7668d5ef05a5b5efcaf9a71a08 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 14 Sep 2023 17:57:07 -0500 Subject: [PATCH 015/182] fix nested array --- element_facemap/facial_pose_model.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index a465d5a..d755657 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -253,12 +253,8 @@ def make(self, key): ).fetch("file_path") video_files = [ - [ - find_full_path( - fbe.get_facemap_root_data_dir(), video_file - ).as_posix() - for video_file in video_files - ] + find_full_path(fbe.get_facemap_root_data_dir(), video_file).as_posix() + for video_file in video_files ] vid_name = Path(video_files[0]).stem # Model Name of interest should be specified by user during facemap task params manual update From 9f147e979a9d51a61c651b4e9cc3a5451a976e91 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 14 Sep 2023 18:05:53 -0500 Subject: [PATCH 016/182] remove bbox from pose object definition --- element_facemap/facial_pose_model.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index d755657..c8c603b 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -281,12 +281,10 @@ def make(self, key): pose = facemap_pose.Pose( filenames=video_files, - bbox=bbox, # should be manually inserted into params via jupyter notebook cell - bbox_set=bool(bbox), model_name=facemap_model_path.stem, ) pose.run() - video_files[0] + # look into facemap naming function facemap_result_path = next(model_output_path.glob(f"*{vid_name}*.h5")) From bba5f09a4dad6b7e63832f2217bcb55334195229 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 15 Sep 2023 12:28:34 -0500 Subject: [PATCH 017/182] add test video files for debugging --- element_facemap/facial_pose_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index c8c603b..97222c6 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -278,9 +278,9 @@ def make(self, key): # Instantiate Pose object, with filenames specified as video files, and bounding specified in params # Assumes GUI to be none as we are running CLI implementation - + test_video_files = list(Path.cwd().glob("*.mp4")) pose = facemap_pose.Pose( - filenames=video_files, + filenames=test_video_files, model_name=facemap_model_path.stem, ) pose.run() From 47285b9fef161e18932f52b0386a135dfafbc870 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 15 Sep 2023 12:32:45 -0500 Subject: [PATCH 018/182] save files as posix --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 97222c6..841d13b 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -278,7 +278,7 @@ def make(self, key): # Instantiate Pose object, with filenames specified as video files, and bounding specified in params # Assumes GUI to be none as we are running CLI implementation - test_video_files = list(Path.cwd().glob("*.mp4")) + test_video_files = list(Path.cwd().glob("*.mp4").as_posix()) pose = facemap_pose.Pose( filenames=test_video_files, model_name=facemap_model_path.stem, From 7e0e7aca9ab60de6d76b25e853078b7e7ff1dc9f Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 15 Sep 2023 12:38:42 -0500 Subject: [PATCH 019/182] save vids as posix for insertion into pose object --- element_facemap/facial_pose_model.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 841d13b..2a14df1 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -278,9 +278,12 @@ def make(self, key): # Instantiate Pose object, with filenames specified as video files, and bounding specified in params # Assumes GUI to be none as we are running CLI implementation - test_video_files = list(Path.cwd().glob("*.mp4").as_posix()) + test_video_files = list(Path.cwd().glob("*.mp4")) + vid_files = [] + for vid in test_video_files: + vid_files.append(vid.as_posix()) pose = facemap_pose.Pose( - filenames=test_video_files, + filenames=vid_files, model_name=facemap_model_path.stem, ) pose.run() From 3e12992f9b272853a6c10286da7dd1b66fc3857c Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 15 Sep 2023 12:42:17 -0500 Subject: [PATCH 020/182] test using video_name instead of full path --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 2a14df1..76bfbf9 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -281,7 +281,7 @@ def make(self, key): test_video_files = list(Path.cwd().glob("*.mp4")) vid_files = [] for vid in test_video_files: - vid_files.append(vid.as_posix()) + vid_files.append(vid.name) pose = facemap_pose.Pose( filenames=vid_files, model_name=facemap_model_path.stem, From 432226bad273ba63da8ccde29794323b925377fb Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 15 Sep 2023 12:44:54 -0500 Subject: [PATCH 021/182] revert to use full video path --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 76bfbf9..2a14df1 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -281,7 +281,7 @@ def make(self, key): test_video_files = list(Path.cwd().glob("*.mp4")) vid_files = [] for vid in test_video_files: - vid_files.append(vid.name) + vid_files.append(vid.as_posix()) pose = facemap_pose.Pose( filenames=vid_files, model_name=facemap_model_path.stem, From dff20c4c2dbe1dcf0aa686bd71dd5f0a47f53152 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 15 Sep 2023 12:55:38 -0500 Subject: [PATCH 022/182] add list around filenames --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 2a14df1..dee6bbf 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -283,7 +283,7 @@ def make(self, key): for vid in test_video_files: vid_files.append(vid.as_posix()) pose = facemap_pose.Pose( - filenames=vid_files, + filenames=[vid_files], model_name=facemap_model_path.stem, ) pose.run() From 93b67471eab25572f7674373164cb07ff216849d Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 15 Sep 2023 14:17:37 -0500 Subject: [PATCH 023/182] fix load from output .h5 --- element_facemap/facial_pose_model.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index dee6bbf..d38cb88 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -263,7 +263,8 @@ def make(self, key): facemap_model_name = (FacemapModel.File & f'model_id="{model_id}"').fetch1( "model_file" ) - facemap_model_path = Path.cwd() / facemap_model_name + working_dir = Path.cwd() + facemap_model_path = working_dir / facemap_model_name # move this "facemap_model_path" to the facemap model root directory models_root_dir = model_loader.get_models_dir() @@ -287,12 +288,10 @@ def make(self, key): model_name=facemap_model_path.stem, ) pose.run() - - # look into facemap naming function - facemap_result_path = next(model_output_path.glob(f"*{vid_name}*.h5")) - - # only 1 .h5 model output - full_metadata_path = next(model_output_path.glob(f"*{vid_name}*.pkl")) + + # expect single .h5 model and .pkl metadata output + facemap_result_path = next(working_dir.glob(f"*{vid_name}*FacemapPose*.h5")) + full_metadata_path = next(working_dir.glob(f"*{vid_name}*FacemapPose*.pkl")) # copy local facemap output to output directory facemap_result_path.write_bytes(output_dir.read_bytes()) @@ -319,6 +318,8 @@ def make(self, key): "y_pos": pose_y_coord[b_idx], "likelihood": pose_likelihood[b_idx], } + elif task_mode == "load": + # Load externally processed facemap pose estimation results creation_time = datetime.fromtimestamp( full_metadata_path.stat().st_mtime From 8a035a886c505eb71da1cbe1e11147bbe01b952e Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 15 Sep 2023 14:24:28 -0500 Subject: [PATCH 024/182] formatting --- element_facemap/facial_pose_model.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index d38cb88..40a2d8d 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -288,7 +288,7 @@ def make(self, key): model_name=facemap_model_path.stem, ) pose.run() - + # expect single .h5 model and .pkl metadata output facemap_result_path = next(working_dir.glob(f"*{vid_name}*FacemapPose*.h5")) full_metadata_path = next(working_dir.glob(f"*{vid_name}*FacemapPose*.pkl")) @@ -318,8 +318,6 @@ def make(self, key): "y_pos": pose_y_coord[b_idx], "likelihood": pose_likelihood[b_idx], } - elif task_mode == "load": - # Load externally processed facemap pose estimation results creation_time = datetime.fromtimestamp( full_metadata_path.stat().st_mtime From 609bc36a2a3f68abf6336c82bc935e1a889bf3a7 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 15 Sep 2023 17:00:48 -0500 Subject: [PATCH 025/182] switch back to video files specified in fbe.VR --- element_facemap/facial_pose_model.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 40a2d8d..d924f4e 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -279,12 +279,12 @@ def make(self, key): # Instantiate Pose object, with filenames specified as video files, and bounding specified in params # Assumes GUI to be none as we are running CLI implementation - test_video_files = list(Path.cwd().glob("*.mp4")) - vid_files = [] - for vid in test_video_files: - vid_files.append(vid.as_posix()) + # test_video_files = list(Path.cwd().glob("*.mp4")) + # vid_files = [] + # for vid in test_video_files: + # vid_files.append(vid.as_posix()) pose = facemap_pose.Pose( - filenames=[vid_files], + filenames=[video_files], model_name=facemap_model_path.stem, ) pose.run() From 7228d704e02a381990d31b64efec32ddf5d327a2 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 15 Sep 2023 17:12:40 -0500 Subject: [PATCH 026/182] change model output dir to fbe.VR video root directory --- element_facemap/facial_pose_model.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index d924f4e..0015150 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -246,6 +246,8 @@ def make(self, key): from facemap.pose import pose as facemap_pose, model_loader from facemap import utils + facemap_video_root_data_dir = fbe.get_facemap_root_data_dir() + bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") video_files = ( @@ -289,9 +291,14 @@ def make(self, key): ) pose.run() - # expect single .h5 model and .pkl metadata output - facemap_result_path = next(working_dir.glob(f"*{vid_name}*FacemapPose*.h5")) - full_metadata_path = next(working_dir.glob(f"*{vid_name}*FacemapPose*.pkl")) + # expect single .h5 model and .pkl metadata output in same directory that videos are stored + + facemap_result_path = next( + facemap_video_root_data_dir.glob(f"*{vid_name}*FacemapPose*.h5") + ) + full_metadata_path = next( + facemap_video_root_data_dir.glob(f"*{vid_name}*FacemapPose*.pkl") + ) # copy local facemap output to output directory facemap_result_path.write_bytes(output_dir.read_bytes()) From 689b37d489e0c4090eda30fba16b625224c3e032 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Sat, 16 Sep 2023 16:33:34 -0500 Subject: [PATCH 027/182] set output to load from video root dir --- element_facemap/facial_pose_model.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 0015150..bf4bd8b 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -246,8 +246,6 @@ def make(self, key): from facemap.pose import pose as facemap_pose, model_loader from facemap import utils - facemap_video_root_data_dir = fbe.get_facemap_root_data_dir() - bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") video_files = ( @@ -259,6 +257,7 @@ def make(self, key): for video_file in video_files ] vid_name = Path(video_files[0]).stem + facemap_video_root_data_dir = Path(video_files[0]).parent # Model Name of interest should be specified by user during facemap task params manual update model_id = (FacemapPoseEstimationTask & key).fetch("model_id") # Fetches file attachment @@ -277,14 +276,8 @@ def make(self, key): # copy using pathlib (validate that model can still be loaded by pytorch) model_output_path.write_bytes(facemap_model_path.read_bytes()) - # Processing performed using externally trained deep learning models - # Instantiate Pose object, with filenames specified as video files, and bounding specified in params # Assumes GUI to be none as we are running CLI implementation - # test_video_files = list(Path.cwd().glob("*.mp4")) - # vid_files = [] - # for vid in test_video_files: - # vid_files.append(vid.as_posix()) pose = facemap_pose.Pose( filenames=[video_files], model_name=facemap_model_path.stem, From 9466322cad694e184c0c29b89d7fef62e8dc0e74 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Sun, 17 Sep 2023 19:13:01 -0500 Subject: [PATCH 028/182] use shutil to copy files --- element_facemap/facial_pose_model.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index bf4bd8b..acfafa4 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -2,9 +2,9 @@ import inspect from datetime import datetime from glob import glob +import pathlib3x as pathlib from pathlib import Path from typing import List, Tuple - import cv2 import datajoint as dj import numpy as np @@ -14,6 +14,7 @@ import os import h5py import pickle +import shutil from . import facial_behavior_estimation as fbe schema = dj.schema() @@ -177,7 +178,7 @@ def infer_output_dir(self, key, relative=True, mkdir=True): video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) - paramset_key = (FacemapPoseEstimationTask & key).fetch1("facemap_task_id") + paramset_key = (FacemapPoseEstimationTask & key).fetch1("model_id") processed_dir = Path(fbe.get_facemap_processed_data_dir()) output_dir = ( processed_dir / video_dir.relative_to(root_dir) / f"facemap_{paramset_key}" @@ -260,21 +261,19 @@ def make(self, key): facemap_video_root_data_dir = Path(video_files[0]).parent # Model Name of interest should be specified by user during facemap task params manual update model_id = (FacemapPoseEstimationTask & key).fetch("model_id") - # Fetches file attachment + + # Fetches model(.pt) file attachment to present working directory facemap_model_name = (FacemapModel.File & f'model_id="{model_id}"').fetch1( "model_file" ) working_dir = Path.cwd() facemap_model_path = working_dir / facemap_model_name - # move this "facemap_model_path" to the facemap model root directory models_root_dir = model_loader.get_models_dir() - model_output_path = Path(models_root_dir) / facemap_model_name - # import shutil - # shutil.copy(facemap_model_path, models_root_dir) + # model_output_path = Path(models_root_dir) / facemap_model_name - # copy using pathlib (validate that model can still be loaded by pytorch) - model_output_path.write_bytes(facemap_model_path.read_bytes()) + # copy this model file to the facemap model root directory (~/.facemap/models/) + shutil.copy(facemap_model_path, models_root_dir) # Instantiate Pose object, with filenames specified as video files, and bounding specified in params # Assumes GUI to be none as we are running CLI implementation @@ -285,7 +284,6 @@ def make(self, key): pose.run() # expect single .h5 model and .pkl metadata output in same directory that videos are stored - facemap_result_path = next( facemap_video_root_data_dir.glob(f"*{vid_name}*FacemapPose*.h5") ) @@ -294,8 +292,8 @@ def make(self, key): ) # copy local facemap output to output directory - facemap_result_path.write_bytes(output_dir.read_bytes()) - full_metadata_path.write_bytes(output_dir.read_bytes()) + shutil.copy(facemap_result_path, output_dir) + shutil.copy(full_metadata_path, output_dir) # only 1 metadata.pkl inference output with open(full_metadata_path, "rb") as f: From 86640d23080c2fc0065a8e9f609af89a4c1a5d94 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Sun, 17 Sep 2023 19:18:23 -0500 Subject: [PATCH 029/182] remove pathlib3x from import --- element_facemap/facial_pose_model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index acfafa4..082e34c 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -2,7 +2,6 @@ import inspect from datetime import datetime from glob import glob -import pathlib3x as pathlib from pathlib import Path from typing import List, Tuple import cv2 From f50b063f40a6098e028c36f589ab02b2ce00290a Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 18 Sep 2023 10:23:45 -0500 Subject: [PATCH 030/182] update body_part insertion key --- element_facemap/facial_pose_model.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 082e34c..b020351 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -319,6 +319,14 @@ def make(self, key): creation_time = datetime.fromtimestamp( full_metadata_path.stat().st_mtime ).strftime("%Y-%m-%d %H:%M:%S") - - self.insert1({**key, "pose_estimation_time": creation_time}) + inference_duration = metadata["total_frames"] * metadata["inference_speed"] + + self.insert1( + { + **key, + "pose_estimation_time": creation_time, + "pose_estimation_duration": inference_duration, + "total_frame_count": metadata["total_frames"], + } + ) self.BodyPartPosition.insert(body_part_position_entry) From 6a6941782ab95553a51644be232ebf4b308ba627 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 18 Sep 2023 15:23:59 -0500 Subject: [PATCH 031/182] refactor to load results before rerunning processing to solve lost transaction issue --- element_facemap/facial_pose_model.py | 104 +++++++++++++++++---------- 1 file changed, 65 insertions(+), 39 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index b020351..497166b 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -236,6 +236,17 @@ def make(self, key): ) output_dir = find_full_path(fbe.get_facemap_root_data_dir(), output_dir) + video_files = ( + FacemapPoseEstimationTask * fbe.VideoRecording.File & key + ).fetch("file_path") + + video_files = [ + find_full_path(fbe.get_facemap_root_data_dir(), video_file).as_posix() + for video_file in video_files + ] + vid_name = Path(video_files[0]).stem + facemap_result_path = output_dir / f"{vid_name}_FacemapPose.h5" + full_metadata_path = output_dir / f"{vid_name}_FacemapPose_metadata.pkl" # Triger PoseEstimation if task_mode == "trigger": @@ -244,27 +255,35 @@ def make(self, key): # - video_filepaths: full paths to the video files for inference # - analyze_video_params: optional parameters to analyze video from facemap.pose import pose as facemap_pose, model_loader - from facemap import utils - bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") + # check to see if output files have been created, if they have, load the output + + # think about file writing to inbox issue - video_files = ( - FacemapPoseEstimationTask * fbe.VideoRecording.File & key - ).fetch("file_path") + bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") - video_files = [ - find_full_path(fbe.get_facemap_root_data_dir(), video_file).as_posix() - for video_file in video_files - ] - vid_name = Path(video_files[0]).stem facemap_video_root_data_dir = Path(video_files[0]).parent # Model Name of interest should be specified by user during facemap task params manual update model_id = (FacemapPoseEstimationTask & key).fetch("model_id") + + if facemap_result_path.exists() & full_metadata_path.exists(): # Load results and do not rerun processing + body_part_position_entry, inference_duration, total_frame_count, creation_time = _load_facemap_results(facemap_result_path, full_metadata_path) + self.insert1( + { + **key, + "pose_estimation_time": creation_time, + "pose_estimation_duration": inference_duration, + "total_frame_count": metadata["total_frames"], + } + ) + self.BodyPartPosition.insert(body_part_position_entry) + return # Fetches model(.pt) file attachment to present working directory facemap_model_name = (FacemapModel.File & f'model_id="{model_id}"').fetch1( "model_file" ) + working_dir = Path.cwd() facemap_model_path = working_dir / facemap_model_name @@ -284,43 +303,19 @@ def make(self, key): # expect single .h5 model and .pkl metadata output in same directory that videos are stored facemap_result_path = next( - facemap_video_root_data_dir.glob(f"*{vid_name}*FacemapPose*.h5") + facemap_video_root_data_dir.glob(f"*{vid_name}_FacemapPose.h5") ) full_metadata_path = next( - facemap_video_root_data_dir.glob(f"*{vid_name}*FacemapPose*.pkl") + facemap_video_root_data_dir.glob(f"*{vid_name}_FacemapPose_metadata.pkl") ) # copy local facemap output to output directory shutil.copy(facemap_result_path, output_dir) shutil.copy(full_metadata_path, output_dir) - # only 1 metadata.pkl inference output - with open(full_metadata_path, "rb") as f: - metadata = pickle.load(f) - - keypoints_data = utils.load_keypoints( - metadata["bodyparts"], facemap_result_path - ) - # facemap_result is a 3D nested array with D1 - (x,y likelihood) D2 - bodyparts D3 - frame count - # body parts are ordered the same way as stored - - pose_x_coord = keypoints_data[0, :, :] # (bodyparts, frames) - pose_y_coord = keypoints_data[1, :, :] # (bodyparts, frames) - pose_likelihood = keypoints_data[2, :, :] # (bodyparts, frames) - - for b_idx, bodypart in enumerate(metadata["bodyparts"]): - body_part_position_entry = { - "body_part": bodypart, - "x_pos": pose_x_coord[b_idx], - "y_pos": pose_y_coord[b_idx], - "likelihood": pose_likelihood[b_idx], - } - - creation_time = datetime.fromtimestamp( - full_metadata_path.stat().st_mtime - ).strftime("%Y-%m-%d %H:%M:%S") - inference_duration = metadata["total_frames"] * metadata["inference_speed"] - + body_part_position_entry, inference_duration, total_frame_count, creation_time = _load_facemap_results(facemap_result_path, full_metadata_path) + elif task_mode == "load": + self.insert1( { **key, @@ -330,3 +325,34 @@ def make(self, key): } ) self.BodyPartPosition.insert(body_part_position_entry) + + +def _load_facemap_results(facemap_result_path, full_metadata_path) + from facemap import utils + with open(full_metadata_path, "rb") as f: + metadata = pickle.load(f) + + keypoints_data = utils.load_keypoints( + metadata["bodyparts"], facemap_result_path + ) + # facemap_result is a 3D nested array with D1 - (x,y likelihood) D2 - bodyparts D3 - frame count + # body parts are ordered the same way as stored + + pose_x_coord = keypoints_data[0, :, :] # (bodyparts, frames) + pose_y_coord = keypoints_data[1, :, :] # (bodyparts, frames) + pose_likelihood = keypoints_data[2, :, :] # (bodyparts, frames) + + for b_idx, bodypart in enumerate(metadata["bodyparts"]): + body_part_position_entry = { + "body_part": bodypart, + "x_pos": pose_x_coord[b_idx], + "y_pos": pose_y_coord[b_idx], + "likelihood": pose_likelihood[b_idx], + } + + creation_time = datetime.fromtimestamp( + full_metadata_path.stat().st_mtime + ).strftime("%Y-%m-%d %H:%M:%S") + inference_duration = metadata["total_frames"] * metadata["inference_speed"] + + return body_part_position_entry, inference_duration, metadata["total_frames"], creation_time \ No newline at end of file From 8154e618d7a14e8daf870f84cd9feffaf079a3d6 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 18 Sep 2023 18:15:23 -0500 Subject: [PATCH 032/182] minor formatting --- element_facemap/facial_pose_model.py | 91 ++++++++++++++++++---------- 1 file changed, 58 insertions(+), 33 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 497166b..2828c9c 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -236,9 +236,9 @@ def make(self, key): ) output_dir = find_full_path(fbe.get_facemap_root_data_dir(), output_dir) - video_files = ( - FacemapPoseEstimationTask * fbe.VideoRecording.File & key - ).fetch("file_path") + video_files = (FacemapPoseEstimationTask * fbe.VideoRecording.File & key).fetch( + "file_path" + ) video_files = [ find_full_path(fbe.get_facemap_root_data_dir(), video_file).as_posix() @@ -248,26 +248,25 @@ def make(self, key): facemap_result_path = output_dir / f"{vid_name}_FacemapPose.h5" full_metadata_path = output_dir / f"{vid_name}_FacemapPose_metadata.pkl" - # Triger PoseEstimation + # Trigger Facemap Pose Estimation Inference if task_mode == "trigger": - # Triggering facemap for pose estimation required: + # Triggering facemap for pose estimation requires: # - model_path: full path to the directory containing the trained model # - video_filepaths: full paths to the video files for inference - # - analyze_video_params: optional parameters to analyze video - from facemap.pose import pose as facemap_pose, model_loader + # - analyze_video_params: optional parameters to analyze video (uses facemap default params) - # check to see if output files have been created, if they have, load the output - - # think about file writing to inbox issue - - bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") + from facemap.pose import pose as facemap_pose, model_loader - facemap_video_root_data_dir = Path(video_files[0]).parent - # Model Name of interest should be specified by user during facemap task params manual update - model_id = (FacemapPoseEstimationTask & key).fetch("model_id") - - if facemap_result_path.exists() & full_metadata_path.exists(): # Load results and do not rerun processing - body_part_position_entry, inference_duration, total_frame_count, creation_time = _load_facemap_results(facemap_result_path, full_metadata_path) + # If output files have been created, load the output + if ( + facemap_result_path.exists() & full_metadata_path.exists() + ): # Load results and do not rerun processing + ( + body_part_position_entry, + inference_duration, + total_frame_count, + creation_time, + ) = _load_facemap_results(facemap_result_path, full_metadata_path) self.insert1( { **key, @@ -279,16 +278,22 @@ def make(self, key): self.BodyPartPosition.insert(body_part_position_entry) return - # Fetches model(.pt) file attachment to present working directory + # think about file writing to inbox issue + + bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") + facemap_video_root_data_dir = Path(video_files[0]).parent + + # Model Name of interest should be specified by user during facemap task params manual update + model_id = (FacemapPoseEstimationTask & key).fetch("model_id") + + # Fetch model(.pt) file attachment to present working directory facemap_model_name = (FacemapModel.File & f'model_id="{model_id}"').fetch1( "model_file" ) working_dir = Path.cwd() facemap_model_path = working_dir / facemap_model_name - models_root_dir = model_loader.get_models_dir() - # model_output_path = Path(models_root_dir) / facemap_model_name # copy this model file to the facemap model root directory (~/.facemap/models/) shutil.copy(facemap_model_path, models_root_dir) @@ -306,16 +311,32 @@ def make(self, key): facemap_video_root_data_dir.glob(f"*{vid_name}_FacemapPose.h5") ) full_metadata_path = next( - facemap_video_root_data_dir.glob(f"*{vid_name}_FacemapPose_metadata.pkl") + facemap_video_root_data_dir.glob( + f"*{vid_name}_FacemapPose_metadata.pkl" + ) ) # copy local facemap output to output directory shutil.copy(facemap_result_path, output_dir) shutil.copy(full_metadata_path, output_dir) - body_part_position_entry, inference_duration, total_frame_count, creation_time = _load_facemap_results(facemap_result_path, full_metadata_path) + ( + body_part_position_entry, + inference_duration, + total_frame_count, + creation_time, + ) = _load_facemap_results(facemap_result_path, full_metadata_path) elif task_mode == "load": - + if ( + facemap_result_path.exists() & full_metadata_path.exists() + ): # Load preprocessed inference results + ( + body_part_position_entry, + inference_duration, + total_frame_count, + creation_time, + ) = _load_facemap_results(facemap_result_path, full_metadata_path) + self.insert1( { **key, @@ -327,14 +348,13 @@ def make(self, key): self.BodyPartPosition.insert(body_part_position_entry) -def _load_facemap_results(facemap_result_path, full_metadata_path) +def _load_facemap_results(facemap_result_path, full_metadata_path): from facemap import utils + with open(full_metadata_path, "rb") as f: - metadata = pickle.load(f) + metadata = pickle.load(f) - keypoints_data = utils.load_keypoints( - metadata["bodyparts"], facemap_result_path - ) + keypoints_data = utils.load_keypoints(metadata["bodyparts"], facemap_result_path) # facemap_result is a 3D nested array with D1 - (x,y likelihood) D2 - bodyparts D3 - frame count # body parts are ordered the same way as stored @@ -350,9 +370,14 @@ def _load_facemap_results(facemap_result_path, full_metadata_path) "likelihood": pose_likelihood[b_idx], } - creation_time = datetime.fromtimestamp( - full_metadata_path.stat().st_mtime - ).strftime("%Y-%m-%d %H:%M:%S") + creation_time = datetime.fromtimestamp(full_metadata_path.stat().st_mtime).strftime( + "%Y-%m-%d %H:%M:%S" + ) inference_duration = metadata["total_frames"] * metadata["inference_speed"] - return body_part_position_entry, inference_duration, metadata["total_frames"], creation_time \ No newline at end of file + return ( + body_part_position_entry, + inference_duration, + metadata["total_frames"], + creation_time, + ) From 1d8b8e5b994eee35d5c48077643062694c228790 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 18 Sep 2023 18:26:00 -0500 Subject: [PATCH 033/182] rename total_frame_count --- element_facemap/facial_pose_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 2828c9c..9f9c99e 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -272,7 +272,7 @@ def make(self, key): **key, "pose_estimation_time": creation_time, "pose_estimation_duration": inference_duration, - "total_frame_count": metadata["total_frames"], + "total_frame_count": total_frame_count, } ) self.BodyPartPosition.insert(body_part_position_entry) @@ -342,7 +342,7 @@ def make(self, key): **key, "pose_estimation_time": creation_time, "pose_estimation_duration": inference_duration, - "total_frame_count": metadata["total_frames"], + "total_frame_count": total_frame_count, } ) self.BodyPartPosition.insert(body_part_position_entry) From 7ffaeeaadfae372f836ac1236ca141a53a68a163 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 18 Sep 2023 18:37:16 -0500 Subject: [PATCH 034/182] set body_part to varchar(32) --- element_facemap/facial_pose_model.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 9f9c99e..02c0cc5 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -213,7 +213,7 @@ class BodyPartPosition(dj.Part): Attributes: PoseEstimation (foreign key): Pose Estimation key. FacemapModel.BodyPart (foreign key): Body Part key. - frame_index (longblob): Frame index in model. + body_part (longblob): Body part for positional likelihood x_pos (longblob): X position. y_pos (longblob): Y position. likelihood (longblob): Model confidence.""" @@ -222,10 +222,10 @@ class BodyPartPosition(dj.Part): -> master -> FacemapModel.BodyPart --- - frame_index : longblob # frame index in model - x_pos : longblob - y_pos : longblob - likelihood : longblob + body_part : varchar(32) # body part + x_pos : longblob # x position + y_pos : longblob # y position + likelihood : longblob # model evaluated likelihood """ def make(self, key): From fc58d149f6da6a2de587b5b4ee9474131d6c3ad1 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 10:48:33 -0500 Subject: [PATCH 035/182] add key to bodyparposition entry --- element_facemap/facial_pose_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 02c0cc5..c6c8d2e 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -275,7 +275,7 @@ def make(self, key): "total_frame_count": total_frame_count, } ) - self.BodyPartPosition.insert(body_part_position_entry) + self.BodyPartPosition.insert(**key, body_part_position_entry) return # think about file writing to inbox issue @@ -345,7 +345,7 @@ def make(self, key): "total_frame_count": total_frame_count, } ) - self.BodyPartPosition.insert(body_part_position_entry) + self.BodyPartPosition.insert(**key, body_part_position_entry) def _load_facemap_results(facemap_result_path, full_metadata_path): From 270582134c5e429c07d12e6a94bd1b3f861ae906 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 10:50:21 -0500 Subject: [PATCH 036/182] fix syntax --- element_facemap/facial_pose_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index c6c8d2e..bccc2b9 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -275,7 +275,7 @@ def make(self, key): "total_frame_count": total_frame_count, } ) - self.BodyPartPosition.insert(**key, body_part_position_entry) + self.BodyPartPosition.insert({**key, body_part_position_entry}) return # think about file writing to inbox issue @@ -345,7 +345,7 @@ def make(self, key): "total_frame_count": total_frame_count, } ) - self.BodyPartPosition.insert(**key, body_part_position_entry) + self.BodyPartPosition.insert({**key, body_part_position_entry}) def _load_facemap_results(facemap_result_path, full_metadata_path): From 77ba578cebda6ad771242e2fc9ea5df2f156905c Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 10:53:09 -0500 Subject: [PATCH 037/182] add key to each element of body_part_position_entry --- element_facemap/facial_pose_model.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index bccc2b9..83b1842 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -266,7 +266,7 @@ def make(self, key): inference_duration, total_frame_count, creation_time, - ) = _load_facemap_results(facemap_result_path, full_metadata_path) + ) = _load_facemap_results(key, facemap_result_path, full_metadata_path) self.insert1( { **key, @@ -275,7 +275,7 @@ def make(self, key): "total_frame_count": total_frame_count, } ) - self.BodyPartPosition.insert({**key, body_part_position_entry}) + self.BodyPartPosition.insert(body_part_position_entry) return # think about file writing to inbox issue @@ -335,7 +335,7 @@ def make(self, key): inference_duration, total_frame_count, creation_time, - ) = _load_facemap_results(facemap_result_path, full_metadata_path) + ) = _load_facemap_results(key, facemap_result_path, full_metadata_path) self.insert1( { @@ -345,10 +345,10 @@ def make(self, key): "total_frame_count": total_frame_count, } ) - self.BodyPartPosition.insert({**key, body_part_position_entry}) + self.BodyPartPosition.insert(body_part_postion_entry) -def _load_facemap_results(facemap_result_path, full_metadata_path): +def _load_facemap_results(key, facemap_result_path, full_metadata_path): from facemap import utils with open(full_metadata_path, "rb") as f: @@ -364,6 +364,7 @@ def _load_facemap_results(facemap_result_path, full_metadata_path): for b_idx, bodypart in enumerate(metadata["bodyparts"]): body_part_position_entry = { + **key, "body_part": bodypart, "x_pos": pose_x_coord[b_idx], "y_pos": pose_y_coord[b_idx], From 4de0e0e853c74695202f34bf293f0b7abcd31bb5 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 11:23:34 -0500 Subject: [PATCH 038/182] modify BodyPartPosition to inherit from BodyPart as opposed to FacemapModel.BodyPart --- element_facemap/facial_pose_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 83b1842..ca9db66 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -220,9 +220,8 @@ class BodyPartPosition(dj.Part): definition = """ # uses facemap h5 output for body part position -> master - -> FacemapModel.BodyPart + -> BodyPart --- - body_part : varchar(32) # body part x_pos : longblob # x position y_pos : longblob # y position likelihood : longblob # model evaluated likelihood @@ -363,6 +362,7 @@ def _load_facemap_results(key, facemap_result_path, full_metadata_path): pose_likelihood = keypoints_data[2, :, :] # (bodyparts, frames) for b_idx, bodypart in enumerate(metadata["bodyparts"]): + FacemapModel.BodyPart body_part_position_entry = { **key, "body_part": bodypart, From 0ac98d0ac12a6510f9dbe2185bccaed218dc5611 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 11:26:23 -0500 Subject: [PATCH 039/182] add body_part_key to bodypart_pos_entry --- element_facemap/facial_pose_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index ca9db66..68ad4b4 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -362,10 +362,10 @@ def _load_facemap_results(key, facemap_result_path, full_metadata_path): pose_likelihood = keypoints_data[2, :, :] # (bodyparts, frames) for b_idx, bodypart in enumerate(metadata["bodyparts"]): - FacemapModel.BodyPart + body_part_key = (BodyPart & f"body_part={bodypart}").fetch("KEY") body_part_position_entry = { **key, - "body_part": bodypart, + **body_part_key, "x_pos": pose_x_coord[b_idx], "y_pos": pose_y_coord[b_idx], "likelihood": pose_likelihood[b_idx], From 9c6d0539c6f621d7f9da520caecf0edc938b1683 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 11:31:17 -0500 Subject: [PATCH 040/182] switch to fetch1 --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 68ad4b4..7b607cf 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -362,7 +362,7 @@ def _load_facemap_results(key, facemap_result_path, full_metadata_path): pose_likelihood = keypoints_data[2, :, :] # (bodyparts, frames) for b_idx, bodypart in enumerate(metadata["bodyparts"]): - body_part_key = (BodyPart & f"body_part={bodypart}").fetch("KEY") + body_part_key = (BodyPart & f"body_part='{bodypart}'").fetch1("KEY") body_part_position_entry = { **key, **body_part_key, From 45c82a744ec9f9f4438969cc19f84668c498d0b2 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 11:53:27 -0500 Subject: [PATCH 041/182] inherit from FacemapModel.BodyPart --- element_facemap/facial_pose_model.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 7b607cf..eb01893 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -220,7 +220,7 @@ class BodyPartPosition(dj.Part): definition = """ # uses facemap h5 output for body part position -> master - -> BodyPart + -> FacemapModel.BodyPart --- x_pos : longblob # x position y_pos : longblob # y position @@ -362,10 +362,9 @@ def _load_facemap_results(key, facemap_result_path, full_metadata_path): pose_likelihood = keypoints_data[2, :, :] # (bodyparts, frames) for b_idx, bodypart in enumerate(metadata["bodyparts"]): - body_part_key = (BodyPart & f"body_part='{bodypart}'").fetch1("KEY") body_part_position_entry = { **key, - **body_part_key, + "body_part": bodypart, "x_pos": pose_x_coord[b_idx], "y_pos": pose_y_coord[b_idx], "likelihood": pose_likelihood[b_idx], From 130a7f6fd5d9681c8882766f4220b64c98546b1a Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 12:16:35 -0500 Subject: [PATCH 042/182] modify body_position_entries --- element_facemap/facial_pose_model.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index eb01893..22bf44a 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -360,15 +360,17 @@ def _load_facemap_results(key, facemap_result_path, full_metadata_path): pose_x_coord = keypoints_data[0, :, :] # (bodyparts, frames) pose_y_coord = keypoints_data[1, :, :] # (bodyparts, frames) pose_likelihood = keypoints_data[2, :, :] # (bodyparts, frames) - + body_part_position_entries = [] for b_idx, bodypart in enumerate(metadata["bodyparts"]): - body_part_position_entry = { - **key, - "body_part": bodypart, - "x_pos": pose_x_coord[b_idx], - "y_pos": pose_y_coord[b_idx], - "likelihood": pose_likelihood[b_idx], - } + body_part_position_entries.append( + { + **key, + "body_part": bodypart, + "x_pos": pose_x_coord[b_idx], + "y_pos": pose_y_coord[b_idx], + "likelihood": pose_likelihood[b_idx], + } + ) creation_time = datetime.fromtimestamp(full_metadata_path.stat().st_mtime).strftime( "%Y-%m-%d %H:%M:%S" @@ -376,7 +378,7 @@ def _load_facemap_results(key, facemap_result_path, full_metadata_path): inference_duration = metadata["total_frames"] * metadata["inference_speed"] return ( - body_part_position_entry, + body_part_position_entries, inference_duration, metadata["total_frames"], creation_time, From 2dbfc217d72f824c890e10b51d841481e731491f Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 13:19:55 -0500 Subject: [PATCH 043/182] bugfix --- element_facemap/facial_pose_model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 22bf44a..c39bb14 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -324,7 +324,7 @@ def make(self, key): inference_duration, total_frame_count, creation_time, - ) = _load_facemap_results(facemap_result_path, full_metadata_path) + ) = _load_facemap_results(key, facemap_result_path, full_metadata_path) elif task_mode == "load": if ( facemap_result_path.exists() & full_metadata_path.exists() @@ -360,6 +360,7 @@ def _load_facemap_results(key, facemap_result_path, full_metadata_path): pose_x_coord = keypoints_data[0, :, :] # (bodyparts, frames) pose_y_coord = keypoints_data[1, :, :] # (bodyparts, frames) pose_likelihood = keypoints_data[2, :, :] # (bodyparts, frames) + body_part_position_entries = [] for b_idx, bodypart in enumerate(metadata["bodyparts"]): body_part_position_entries.append( From d6e6259c2cf6e789d67496fab8778780df1b891e Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 14:50:58 -0500 Subject: [PATCH 044/182] fix typo --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index c39bb14..1de9231 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -344,7 +344,7 @@ def make(self, key): "total_frame_count": total_frame_count, } ) - self.BodyPartPosition.insert(body_part_postion_entry) + self.BodyPartPosition.insert(body_part_position_entry) def _load_facemap_results(key, facemap_result_path, full_metadata_path): From 937ec94a4269dd330cfcdd4db47bb98d7416a8af Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 18:09:29 -0500 Subject: [PATCH 045/182] add get_tracectory class method --- element_facemap/facial_pose_model.py | 36 ++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 1de9231..8853260 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -346,6 +346,42 @@ def make(self, key): ) self.BodyPartPosition.insert(body_part_position_entry) + @classmethod + def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: + """Returns a pandas dataframe of coordinates of the specified body_part(s) + + Args: + key (dict): A DataJoint query specifying one FacemapPoseEstimation entry. + body_parts (list, optional): Body parts as a list. If "all", all joints + + Returns: + df: multi index pandas dataframe with DLC scorer names, body_parts + and x/y coordinates of each joint name for a camera_id, similar to + output of DLC dataframe. If 2D, z is set of zeros + """ + model_name = (FacemapModel & f'model_id={key["model_id"]}').fetch("model_name") + + if body_parts == "all": + body_parts = (cls.BodyPartPosition & key).fetch("body_part") + elif not isinstance(body_parts, list): + body_parts = list(body_parts) + + df = None + for body_part in body_parts: + x_pos, y_pos, likelihood = ( + cls.BodyPartPosition & {"body_part": body_part} + ).fetch1("x_pos", "y_pos", "likelihood") + + a = np.vstack((x_pos, y_pos, likelihood)) + a = a.T + pdindex = pd.MultiIndex.from_product( + [[model_name], [body_part], ["x", "y", "likelihood"]], + names=["model", "bodyparts", "coords"], + ) + frame = pd.DataFrame(a, columns=pdindex, index=range(0, a.shape[0])) + df = pd.concat([df, frame], axis=1) + return df + def _load_facemap_results(key, facemap_result_path, full_metadata_path): from facemap import utils From 36dba466adeda64fb1dc47e3f9253aa1359de280 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 18:11:04 -0500 Subject: [PATCH 046/182] add pandas import --- element_facemap/facial_pose_model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 8853260..7eb4e74 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -7,6 +7,7 @@ import cv2 import datajoint as dj import numpy as np +import pandas as pd from element_interface.utils import find_full_path, find_root_directory import torch From c8ff304cfc2bf61c889f1a925e4a7371ce86d351 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 18:35:24 -0500 Subject: [PATCH 047/182] query bodypartposition with recording id --- element_facemap/facial_pose_model.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 7eb4e74..def5ef9 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -370,7 +370,9 @@ def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: df = None for body_part in body_parts: x_pos, y_pos, likelihood = ( - cls.BodyPartPosition & {"body_part": body_part} + cls.BodyPartPosition + & {"body_part": body_part} + & {"recording_id": key["recording_id"]} ).fetch1("x_pos", "y_pos", "likelihood") a = np.vstack((x_pos, y_pos, likelihood)) From 196f934c82a6cdcb46151616508f909494ce3ad8 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 18:46:50 -0500 Subject: [PATCH 048/182] fetch as_dict =True --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index def5ef9..b1f51ab 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -373,7 +373,7 @@ def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: cls.BodyPartPosition & {"body_part": body_part} & {"recording_id": key["recording_id"]} - ).fetch1("x_pos", "y_pos", "likelihood") + ).fetch1("x_pos", "y_pos", "likelihood", as_dict=True) a = np.vstack((x_pos, y_pos, likelihood)) a = a.T From 31216a04385108ec9ba453858ed4c5087be2beb7 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 18:49:40 -0500 Subject: [PATCH 049/182] fetch1 to fetch --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index b1f51ab..f87a8a9 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -373,7 +373,7 @@ def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: cls.BodyPartPosition & {"body_part": body_part} & {"recording_id": key["recording_id"]} - ).fetch1("x_pos", "y_pos", "likelihood", as_dict=True) + ).fetch("x_pos", "y_pos", "likelihood", as_dict=True) a = np.vstack((x_pos, y_pos, likelihood)) a = a.T From 1c49ead20bed363879271109a6e82fec8b2fbbb1 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 19:02:01 -0500 Subject: [PATCH 050/182] modify fetching of results --- element_facemap/facial_pose_model.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index f87a8a9..1e564ae 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -369,12 +369,15 @@ def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: df = None for body_part in body_parts: - x_pos, y_pos, likelihood = ( + result_dict = ( cls.BodyPartPosition & {"body_part": body_part} & {"recording_id": key["recording_id"]} - ).fetch("x_pos", "y_pos", "likelihood", as_dict=True) - + & {"session_id": key["session_id"]} + ).fetch("x_pos", "y_pos", "likelihood", as_dict=True)[0] + x_pos = result_dict["x_pos"] + y_pos = result_dict["y_pos"] + likelihood = result_dict["likelihood"] a = np.vstack((x_pos, y_pos, likelihood)) a = a.T pdindex = pd.MultiIndex.from_product( From 4348c60c2fb7ce75ed01ec6b3784784f31555910 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 19:04:35 -0500 Subject: [PATCH 051/182] convert ndarray to list --- element_facemap/facial_pose_model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 1e564ae..3631990 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -375,9 +375,9 @@ def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: & {"recording_id": key["recording_id"]} & {"session_id": key["session_id"]} ).fetch("x_pos", "y_pos", "likelihood", as_dict=True)[0] - x_pos = result_dict["x_pos"] - y_pos = result_dict["y_pos"] - likelihood = result_dict["likelihood"] + x_pos = result_dict["x_pos"].to_list() + y_pos = result_dict["y_pos"].to_list() + likelihood = result_dict["likelihood"].to_list() a = np.vstack((x_pos, y_pos, likelihood)) a = a.T pdindex = pd.MultiIndex.from_product( From 33b1e0c11426a89c69073ed85491b2fa1463cd4b Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 19:06:18 -0500 Subject: [PATCH 052/182] fix typo --- element_facemap/facial_pose_model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 3631990..d10d334 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -375,9 +375,9 @@ def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: & {"recording_id": key["recording_id"]} & {"session_id": key["session_id"]} ).fetch("x_pos", "y_pos", "likelihood", as_dict=True)[0] - x_pos = result_dict["x_pos"].to_list() - y_pos = result_dict["y_pos"].to_list() - likelihood = result_dict["likelihood"].to_list() + x_pos = result_dict["x_pos"].tolist() + y_pos = result_dict["y_pos"].tolist() + likelihood = result_dict["likelihood"].tolist() a = np.vstack((x_pos, y_pos, likelihood)) a = a.T pdindex = pd.MultiIndex.from_product( From e2e4f09fc38f5a6053991f528788c8e23d3ec8ff Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 19 Sep 2023 19:11:48 -0500 Subject: [PATCH 053/182] fetch to fetch1 --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index d10d334..3d14177 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -360,7 +360,7 @@ def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: and x/y coordinates of each joint name for a camera_id, similar to output of DLC dataframe. If 2D, z is set of zeros """ - model_name = (FacemapModel & f'model_id={key["model_id"]}').fetch("model_name") + model_name = (FacemapModel & f'model_id={key["model_id"]}').fetch1("model_name") if body_parts == "all": body_parts = (cls.BodyPartPosition & key).fetch("body_part") From 77648f7e57a87ed09cdb5d1d7d5a2156a4bf3cd5 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 20 Sep 2023 10:24:17 -0500 Subject: [PATCH 054/182] rename paramset_key to model_id --- element_facemap/facial_pose_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 3d14177..573ad19 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -178,10 +178,10 @@ def infer_output_dir(self, key, relative=True, mkdir=True): video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) - paramset_key = (FacemapPoseEstimationTask & key).fetch1("model_id") + model_id = (FacemapPoseEstimationTask & key).fetch1("model_id") processed_dir = Path(fbe.get_facemap_processed_data_dir()) output_dir = ( - processed_dir / video_dir.relative_to(root_dir) / f"facemap_{paramset_key}" + processed_dir / video_dir.relative_to(root_dir) / f"facemap_{model_id}" ) if mkdir: From dda90813167b98b09fc7313a5f234719d6e095a7 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 20 Sep 2023 12:05:38 -0500 Subject: [PATCH 055/182] add bbox optional input param --- element_facemap/facial_pose_model.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 573ad19..44a96c6 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -278,8 +278,6 @@ def make(self, key): self.BodyPartPosition.insert(body_part_position_entry) return - # think about file writing to inbox issue - bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") facemap_video_root_data_dir = Path(video_files[0]).parent @@ -303,6 +301,8 @@ def make(self, key): pose = facemap_pose.Pose( filenames=[video_files], model_name=facemap_model_path.stem, + bbox=bbox, + bbox_set=bool(bbox), ) pose.run() @@ -326,6 +326,7 @@ def make(self, key): total_frame_count, creation_time, ) = _load_facemap_results(key, facemap_result_path, full_metadata_path) + elif task_mode == "load": if ( facemap_result_path.exists() & full_metadata_path.exists() @@ -356,9 +357,9 @@ def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: body_parts (list, optional): Body parts as a list. If "all", all joints Returns: - df: multi index pandas dataframe with DLC scorer names, body_parts - and x/y coordinates of each joint name for a camera_id, similar to - output of DLC dataframe. If 2D, z is set of zeros + df: multi index pandas dataframe with Facemap model name, body_parts + and x/y coordinates of each body part for a camera_id, similar to + output of facemap inference data. """ model_name = (FacemapModel & f'model_id={key["model_id"]}').fetch1("model_name") @@ -396,7 +397,7 @@ def _load_facemap_results(key, facemap_result_path, full_metadata_path): metadata = pickle.load(f) keypoints_data = utils.load_keypoints(metadata["bodyparts"], facemap_result_path) - # facemap_result is a 3D nested array with D1 - (x,y likelihood) D2 - bodyparts D3 - frame count + # facemap inferene result is a 3D nested array with D1 - (x,y likelihood), D2 - bodyparts, D3 - frame count # body parts are ordered the same way as stored pose_x_coord = keypoints_data[0, :, :] # (bodyparts, frames) From fc7eb1d649d2bb5e021273dc37b60ccb9da6211a Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 20 Sep 2023 14:25:21 -0500 Subject: [PATCH 056/182] specify facemap_model_schema_name to be a string --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 44a96c6..3e7a107 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -23,7 +23,7 @@ def activate( - facemap_model_schema_name, + facemap_model_schema_name: str, fbe_schema_name=None, *, create_schema=True, From 22add729be3a46d849603c364f1ef35e6290cc6b Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 20 Sep 2023 17:05:40 -0500 Subject: [PATCH 057/182] minor fixes --- element_facemap/facial_pose_model.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 3e7a107..77bee10 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -47,7 +47,7 @@ def activate( Upstream tables: + Session: A parent table to VideoRecording, identifying a recording session + Equipment: A parent table to VideoRecording, identifying video recording equipment - + VideoRecording: A parent table to FacemapInferenceTask, identifying videos to be used in inference + + VideoRecording: A parent table to FacemapPoseEstimationTask, identifying videos to be used in inference Functions: + get_facemap_root_data_dir() -> list Retrieves the root data directory(s) with face recordings for all @@ -150,7 +150,7 @@ class File(dj.Part): @schema class FacemapPoseEstimationTask(dj.Manual): - """Staging table for pairing of recording and Facemap parameters before processing. + """Staging table for pairing of video recordings and Facemap parameters before processing. Attributes: fbe.VideoRecording (foreign key) : Primary key for VideoRecording table. @@ -169,7 +169,7 @@ class FacemapPoseEstimationTask(dj.Manual): --- pose_estimation_output_dir='' : varchar(255) # output dir - stores results of Facemap Pose estimation analysis task_mode='trigger' : enum('load', 'trigger') - bbox=null : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] + bbox=[] : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] task_description='' : varchar(128) """ @@ -289,8 +289,7 @@ def make(self, key): "model_file" ) - working_dir = Path.cwd() - facemap_model_path = working_dir / facemap_model_name + facemap_model_path = Path.cwd() / facemap_model_name models_root_dir = model_loader.get_models_dir() # copy this model file to the facemap model root directory (~/.facemap/models/) From e7860c3b550d2f0ab78dd0b57b99024bfe9665dc Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 20 Sep 2023 17:29:13 -0500 Subject: [PATCH 058/182] add facemap pose estimation -- run inference notebook --- notebooks/run_inference.ipynb | 382 ++++++++++++++++++++++++++++++++++ 1 file changed, 382 insertions(+) create mode 100644 notebooks/run_inference.ipynb diff --git a/notebooks/run_inference.ipynb b/notebooks/run_inference.ipynb new file mode 100644 index 0000000..596696a --- /dev/null +++ b/notebooks/run_inference.ipynb @@ -0,0 +1,382 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Facemap Pose Estimation -- Run Inference Notebook" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import datetime\n", + "import datajoint as dj\n", + "import os\n", + "\n", + "# change to the upper level folder to detect dj_local_conf.json\n", + "if os.path.basename(os.getcwd()) == \"notebooks\":\n", + " os.chdir(\"..\")\n", + "dj.config.load('dj_local_conf.json')\n", + "\n", + "from workflow.pipeline import *\n", + "from workflow.utils.ingest import ingest_model, generate_facemap_pose_estimation_task" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Insert Subject and Session into subject.Subject, session.Session and session.SessionDirectory tables" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sub_insert = dict(subject=\"mdl_sub\", \n", + " subject_nickname=\"facemap model subject\", \n", + " sex='U', \n", + " subject_birth_date=datetime.datetime.now(), \n", + " subject_description=\"Subject for Facemap Model Inference testing\")\n", + "# subject.Subject.insert1(sub_insert)\n", + "subject_key = (subject.Subject & 'subject=\"mdl_sub\"').fetch1(\"KEY\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session_id = 2\n", + "session_insert = dict(subject_key, session_id, session_datetime=datetime.datetime.now())\n", + "sdir_insert = dict(subject_key, session_id, session_dir=\"20230627_Image_eCBsensor_activity/Behavior_20230627/C57-C11-3_Rm_CNO_30min\")\n", + "\n", + "session.Session.insert1(session_insert)\n", + "session.SessionDirectory.insert1(sdir_insert)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Display Session Table to validate insert\n", + "session.Session() & {**subject_key, 'session_id': session_id}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Display SessionDirectory Table to validate insert\n", + "session.SessionDirectory() & {**subject_key, 'session_id': session_id}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Ingest locally stored pytorch model(.pt) file\n", + "Provide model name, model filepath, and optional model description" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model_name = 'facemap_model_state.pt'\n", + "full_local_model_filepath = \"/Users/sidhulyalkar/.facemap/models/facemap_model_state.pt\"\n", + "ingest_model(model_name, model_description=\"test facemap model\", model_file=full_local_model_filepath)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generate a Pose Estimation Task" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model_id = 0\n", + "session_key = session.Session.fetch(\"KEY\")[2] \n", + "generate_facemap_pose_estimation_task(model_id, session_key, task_mode=\"trigger\", bbox=[])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Display FacemapPoseEstimationTask table" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "facemap_pose.FacemapPoseEstimationTask()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Display VideoRecording and VideoRecording.File tables from the imported facial behavioral estimation (fbe) schema" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fbe.VideoRecording()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fbe.VideoRecording.File()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Run Pose Estimation on all unprocessed FacemapPoseEstimationTasks " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "facemap_pose.FacemapPoseEstimation.populate(display_progress=True)\n", + "# If a lost connection error occurs, rerun the populate and if processing \n", + "# has completed, the data will be loaded and inference will not be rerun. \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Display Facemap Pose Estimation Tables" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "facemap_pose.FacemapPoseEstimation()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "facemap_pose.FacemapPoseEstimation.BodyPartPosition()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Visualize Pose Estimation Output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pe_query = {**session_key, 'recording_id': 0, 'model_id': model_id}\n", + "pose_estimation_key = (facemap_pose.FacemapPoseEstimation & pe_query).fetch1(\"KEY\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Get Trajectory of X and Y coordinates" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Specify all body parts, or set body_parts to a custom list\n", + "body_parts = \"all\"\n", + "model_name = (facemap_pose.FacemapModel & f'model_id={key[\"model_id\"]}').fetch1(\"model_name\")\n", + "\n", + "if body_parts == \"all\":\n", + " body_parts = (facemap_pose.BodyPartPosition & key).fetch(\"body_part\")\n", + "elif not isinstance(body_parts, list):\n", + " body_parts = list(body_parts)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Construct Pandas MultiIndex DataFrame\n", + "df = None\n", + "for body_part in body_parts:\n", + " result_dict = (\n", + " facemap_pose.BodyPartPosition\n", + " & {\"body_part\": body_part}\n", + " & {\"recording_id\": key[\"recording_id\"]}\n", + " & {\"session_id\": key[\"session_id\"]}\n", + " ).fetch(\"x_pos\", \"y_pos\", \"likelihood\", as_dict=True)[0]\n", + " x_pos = result_dict[\"x_pos\"].tolist()\n", + " y_pos = result_dict[\"y_pos\"].tolist()\n", + " likelihood = result_dict[\"likelihood\"].tolist()\n", + " a = np.vstack((x_pos, y_pos, likelihood))\n", + " a = a.T\n", + " pdindex = pd.MultiIndex.from_product(\n", + " [[model_name], [body_part], [\"x\", \"y\", \"likelihood\"]],\n", + " names=[\"model\", \"bodyparts\", \"coords\"],\n", + " )\n", + " frame = pd.DataFrame(a, columns=pdindex, index=range(0, a.shape[0]))\n", + " df = pd.concat([df, frame], axis=1)\n", + "df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Or can use the built in function get_trajectory which also constructs this Pandas MultiIndex DataFrame\n", + "# df=facemap_pose.FacemapPoseEstimation.get_trajectory(pose_estimation_key)\n", + "# df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df_xy = df.iloc[:,df.columns.get_level_values(2).isin([\"x\",\"y\"])]['facemap_model_state.pt']\n", + "df_xy.mean()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Plot coordinates across time for each body part" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df_xy.plot().legend(loc='best', prop={'size': 5})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df_flat = df_xy.copy()\n", + "df_flat.columns = df_flat.columns.map('_'.join)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Plot Trace Overlays of each body part across time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt \n", + "\n", + "fig,ax=plt.subplots(2,2)\n", + "fig.set_figwidth(20)\n", + "fig.set_figheight(15)\n", + "\n", + "df_flat.plot(x='eye(front)_x',y='eye(front)_y',ax=ax[0, 0])\n", + "df_flat.plot(x='eye(back)_x',y='eye(back)_y',ax=ax[0, 0])\n", + "df_flat.plot(x='eye(bottom)_x',y='eye(bottom)_y',ax=ax[0, 0])\n", + "\n", + "df_flat.plot(x='nose(tip)_x',y='nose(tip)_y', ax=ax[1, 0])\n", + "df_flat.plot(x='nose(bottom)_x',y='nose(bottom)_y', ax=ax[1, 0])\n", + "df_flat.plot(x='nose(r)_x',y='nose(r)_y', ax=ax[1, 0])\n", + "df_flat.plot(x='nosebridge_x',y='nosebridge_y', ax=ax[1, 0])\n", + "\n", + "df_flat.plot(x='mouth_x',y='mouth_y', ax=ax[0, 1])\n", + "df_flat.plot(x='lowerlip_x',y='lowerlip_y', ax=ax[0, 1])\n", + "df_flat.plot(x='paw_x',y='paw_y', ax=ax[0, 1])\n", + "\n", + "df_flat.plot(x='whisker(I)_x',y='whisker(I)_y', ax=ax[1, 1])\n", + "df_flat.plot(x='whisker(II)_x',y='whisker(II)_y', ax=ax[1, 1])\n", + "df_flat.plot(x='whisker(II)_x',y='whisker(II)_y', ax=ax[1, 1])\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.17" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} From ac7e541c8ce0ca021e36be08de53927a9ca98c9c Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 21 Sep 2023 11:48:19 -0500 Subject: [PATCH 059/182] modify activate parameter outputs --- element_facemap/facial_pose_model.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 77bee10..ea91c30 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -24,11 +24,11 @@ def activate( facemap_model_schema_name: str, - fbe_schema_name=None, + fbe_schema_name: str = None, *, - create_schema=True, - create_tables=True, - linking_module=None, + create_schema: bool = True, + create_tables: bool = True, + linking_module: str = None, ): """Activate schema. From c7771a88b0ed15c39a4d76e0d45929383aff7961 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 21 Sep 2023 13:10:07 -0500 Subject: [PATCH 060/182] remove notebook for now --- notebooks/run_inference.ipynb | 382 ---------------------------------- 1 file changed, 382 deletions(-) delete mode 100644 notebooks/run_inference.ipynb diff --git a/notebooks/run_inference.ipynb b/notebooks/run_inference.ipynb deleted file mode 100644 index 596696a..0000000 --- a/notebooks/run_inference.ipynb +++ /dev/null @@ -1,382 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Facemap Pose Estimation -- Run Inference Notebook" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import datetime\n", - "import datajoint as dj\n", - "import os\n", - "\n", - "# change to the upper level folder to detect dj_local_conf.json\n", - "if os.path.basename(os.getcwd()) == \"notebooks\":\n", - " os.chdir(\"..\")\n", - "dj.config.load('dj_local_conf.json')\n", - "\n", - "from workflow.pipeline import *\n", - "from workflow.utils.ingest import ingest_model, generate_facemap_pose_estimation_task" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Insert Subject and Session into subject.Subject, session.Session and session.SessionDirectory tables" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sub_insert = dict(subject=\"mdl_sub\", \n", - " subject_nickname=\"facemap model subject\", \n", - " sex='U', \n", - " subject_birth_date=datetime.datetime.now(), \n", - " subject_description=\"Subject for Facemap Model Inference testing\")\n", - "# subject.Subject.insert1(sub_insert)\n", - "subject_key = (subject.Subject & 'subject=\"mdl_sub\"').fetch1(\"KEY\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "session_id = 2\n", - "session_insert = dict(subject_key, session_id, session_datetime=datetime.datetime.now())\n", - "sdir_insert = dict(subject_key, session_id, session_dir=\"20230627_Image_eCBsensor_activity/Behavior_20230627/C57-C11-3_Rm_CNO_30min\")\n", - "\n", - "session.Session.insert1(session_insert)\n", - "session.SessionDirectory.insert1(sdir_insert)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Display Session Table to validate insert\n", - "session.Session() & {**subject_key, 'session_id': session_id}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Display SessionDirectory Table to validate insert\n", - "session.SessionDirectory() & {**subject_key, 'session_id': session_id}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Ingest locally stored pytorch model(.pt) file\n", - "Provide model name, model filepath, and optional model description" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model_name = 'facemap_model_state.pt'\n", - "full_local_model_filepath = \"/Users/sidhulyalkar/.facemap/models/facemap_model_state.pt\"\n", - "ingest_model(model_name, model_description=\"test facemap model\", model_file=full_local_model_filepath)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Generate a Pose Estimation Task" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model_id = 0\n", - "session_key = session.Session.fetch(\"KEY\")[2] \n", - "generate_facemap_pose_estimation_task(model_id, session_key, task_mode=\"trigger\", bbox=[])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Display FacemapPoseEstimationTask table" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "facemap_pose.FacemapPoseEstimationTask()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Display VideoRecording and VideoRecording.File tables from the imported facial behavioral estimation (fbe) schema" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fbe.VideoRecording()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fbe.VideoRecording.File()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Run Pose Estimation on all unprocessed FacemapPoseEstimationTasks " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "facemap_pose.FacemapPoseEstimation.populate(display_progress=True)\n", - "# If a lost connection error occurs, rerun the populate and if processing \n", - "# has completed, the data will be loaded and inference will not be rerun. \n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Display Facemap Pose Estimation Tables" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "facemap_pose.FacemapPoseEstimation()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "facemap_pose.FacemapPoseEstimation.BodyPartPosition()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Visualize Pose Estimation Output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pe_query = {**session_key, 'recording_id': 0, 'model_id': model_id}\n", - "pose_estimation_key = (facemap_pose.FacemapPoseEstimation & pe_query).fetch1(\"KEY\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Get Trajectory of X and Y coordinates" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Specify all body parts, or set body_parts to a custom list\n", - "body_parts = \"all\"\n", - "model_name = (facemap_pose.FacemapModel & f'model_id={key[\"model_id\"]}').fetch1(\"model_name\")\n", - "\n", - "if body_parts == \"all\":\n", - " body_parts = (facemap_pose.BodyPartPosition & key).fetch(\"body_part\")\n", - "elif not isinstance(body_parts, list):\n", - " body_parts = list(body_parts)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Construct Pandas MultiIndex DataFrame\n", - "df = None\n", - "for body_part in body_parts:\n", - " result_dict = (\n", - " facemap_pose.BodyPartPosition\n", - " & {\"body_part\": body_part}\n", - " & {\"recording_id\": key[\"recording_id\"]}\n", - " & {\"session_id\": key[\"session_id\"]}\n", - " ).fetch(\"x_pos\", \"y_pos\", \"likelihood\", as_dict=True)[0]\n", - " x_pos = result_dict[\"x_pos\"].tolist()\n", - " y_pos = result_dict[\"y_pos\"].tolist()\n", - " likelihood = result_dict[\"likelihood\"].tolist()\n", - " a = np.vstack((x_pos, y_pos, likelihood))\n", - " a = a.T\n", - " pdindex = pd.MultiIndex.from_product(\n", - " [[model_name], [body_part], [\"x\", \"y\", \"likelihood\"]],\n", - " names=[\"model\", \"bodyparts\", \"coords\"],\n", - " )\n", - " frame = pd.DataFrame(a, columns=pdindex, index=range(0, a.shape[0]))\n", - " df = pd.concat([df, frame], axis=1)\n", - "df" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Or can use the built in function get_trajectory which also constructs this Pandas MultiIndex DataFrame\n", - "# df=facemap_pose.FacemapPoseEstimation.get_trajectory(pose_estimation_key)\n", - "# df" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "df_xy = df.iloc[:,df.columns.get_level_values(2).isin([\"x\",\"y\"])]['facemap_model_state.pt']\n", - "df_xy.mean()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Plot coordinates across time for each body part" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "df_xy.plot().legend(loc='best', prop={'size': 5})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "df_flat = df_xy.copy()\n", - "df_flat.columns = df_flat.columns.map('_'.join)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Plot Trace Overlays of each body part across time" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt \n", - "\n", - "fig,ax=plt.subplots(2,2)\n", - "fig.set_figwidth(20)\n", - "fig.set_figheight(15)\n", - "\n", - "df_flat.plot(x='eye(front)_x',y='eye(front)_y',ax=ax[0, 0])\n", - "df_flat.plot(x='eye(back)_x',y='eye(back)_y',ax=ax[0, 0])\n", - "df_flat.plot(x='eye(bottom)_x',y='eye(bottom)_y',ax=ax[0, 0])\n", - "\n", - "df_flat.plot(x='nose(tip)_x',y='nose(tip)_y', ax=ax[1, 0])\n", - "df_flat.plot(x='nose(bottom)_x',y='nose(bottom)_y', ax=ax[1, 0])\n", - "df_flat.plot(x='nose(r)_x',y='nose(r)_y', ax=ax[1, 0])\n", - "df_flat.plot(x='nosebridge_x',y='nosebridge_y', ax=ax[1, 0])\n", - "\n", - "df_flat.plot(x='mouth_x',y='mouth_y', ax=ax[0, 1])\n", - "df_flat.plot(x='lowerlip_x',y='lowerlip_y', ax=ax[0, 1])\n", - "df_flat.plot(x='paw_x',y='paw_y', ax=ax[0, 1])\n", - "\n", - "df_flat.plot(x='whisker(I)_x',y='whisker(I)_y', ax=ax[1, 1])\n", - "df_flat.plot(x='whisker(II)_x',y='whisker(II)_y', ax=ax[1, 1])\n", - "df_flat.plot(x='whisker(II)_x',y='whisker(II)_y', ax=ax[1, 1])\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.17" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} From 9d9f696d9c192833ae52e1db968400ca4cbfcc2b Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 21 Sep 2023 13:16:36 -0500 Subject: [PATCH 061/182] remove unused imports --- element_facemap/facial_pose_model.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index ea91c30..96e541a 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -4,15 +4,12 @@ from glob import glob from pathlib import Path from typing import List, Tuple -import cv2 import datajoint as dj import numpy as np import pandas as pd from element_interface.utils import find_full_path, find_root_directory -import torch import os -import h5py import pickle import shutil from . import facial_behavior_estimation as fbe From d03320a8373a46af3546b22673ed99bbeb024d18 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 25 Sep 2023 17:17:58 -0500 Subject: [PATCH 062/182] add insert_model classfunction --- element_facemap/facial_pose_model.py | 63 ++++++++++++++++++++++++++-- 1 file changed, 59 insertions(+), 4 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 96e541a..2f12110 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -100,14 +100,31 @@ class BodyPart(dj.Lookup): --- body_part_description='' : varchar(1000) """ - - + + contents = [ # Facemap Default BodyPart list + "eye(back)", + "eye(bottom)", + "eye(front)", + "eye(top)", + "lowerlip", + "mouth", + "nose(bottom)", + "nose(r)", + "nose(tip)", + "nose(top)", + "nosebridge", + "paw", + "whisker(I)", + "whisker(III)", + "whisker(II)", + ] + @schema class FacemapModel(dj.Manual): """Trained Models stored for facial pose inference Attributes: - model_id(int) : File identification number, located in filename + model_id(int) : User specified ID associated with a unique model model_name( varchar(64) ): Name of model, filepath.stem """ @@ -143,7 +160,40 @@ class File(dj.Part): --- model_file: attach # model file attachment """ + @classmethod + def insert_new_model(cls, model_id: int, model_name: str, model_description: str, full_model_path: str): + facemap_model_insert = dict( + model_id=model_id, model_name=model_name, model_description=model_description + ) + FacemapModel.insert1(facemap_model_insert) + + body_part_insert = [] + body_parts = [ + "eye(back)", + "eye(bottom)", + "eye(front)", + "eye(top)", + "lowerlip", + "mouth", + "nose(bottom)", + "nose(r)", + "nose(tip)", + "nose(top)", + "nosebridge", + "paw", + "whisker(I)", + "whisker(III)", + "whisker(II)", + ] + for bp in body_parts: + body_part_insert.append(dict(model_id=model_id, body_part=bp)) + # Insert into parent BodyPart table if no entries are present + if len(cls.BodyPart()) == 0: + cls.BodyPart.insert(body_part_insert) + file_insert = dict(model_id=model_id, model_file=full_model_path) + cls.BodyPart.insert(body_part_insert) + cls.File.insert1(file_insert) @schema class FacemapPoseEstimationTask(dj.Manual): @@ -169,7 +219,8 @@ class FacemapPoseEstimationTask(dj.Manual): bbox=[] : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] task_description='' : varchar(128) """ - + + @classmethod def infer_output_dir(self, key, relative=True, mkdir=True): video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0] video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent @@ -245,6 +296,10 @@ def make(self, key): facemap_result_path = output_dir / f"{vid_name}_FacemapPose.h5" full_metadata_path = output_dir / f"{vid_name}_FacemapPose_metadata.pkl" + # Create Symbolic Link to raw video data files from outbox location + for video_file in video_files: + video_symlink = (video_file) + # Trigger Facemap Pose Estimation Inference if task_mode == "trigger": # Triggering facemap for pose estimation requires: From 1d765f756eba8e2a274fd3b0b34a4d706735e32b Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 26 Sep 2023 14:00:33 -0500 Subject: [PATCH 063/182] add symlinks for video files to output dir --- element_facemap/facial_pose_model.py | 35 ++++++++++------------------ 1 file changed, 12 insertions(+), 23 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 2f12110..0a28724 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -101,7 +101,8 @@ class BodyPart(dj.Lookup): body_part_description='' : varchar(1000) """ - contents = [ # Facemap Default BodyPart list + # Facemap Default BodyPart list + contents = [ "eye(back)", "eye(bottom)", "eye(front)", @@ -168,25 +169,9 @@ def insert_new_model(cls, model_id: int, model_name: str, model_description: str FacemapModel.insert1(facemap_model_insert) body_part_insert = [] - body_parts = [ - "eye(back)", - "eye(bottom)", - "eye(front)", - "eye(top)", - "lowerlip", - "mouth", - "nose(bottom)", - "nose(r)", - "nose(tip)", - "nose(top)", - "nosebridge", - "paw", - "whisker(I)", - "whisker(III)", - "whisker(II)", - ] - for bp in body_parts: + for bp in BodyPart.contents: body_part_insert.append(dict(model_id=model_id, body_part=bp)) + # Insert into parent BodyPart table if no entries are present if len(cls.BodyPart()) == 0: cls.BodyPart.insert(body_part_insert) @@ -219,7 +204,7 @@ class FacemapPoseEstimationTask(dj.Manual): bbox=[] : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] task_description='' : varchar(128) """ - + @classmethod def infer_output_dir(self, key, relative=True, mkdir=True): video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0] @@ -297,9 +282,13 @@ def make(self, key): full_metadata_path = output_dir / f"{vid_name}_FacemapPose_metadata.pkl" # Create Symbolic Link to raw video data files from outbox location + video_symlinks = [] for video_file in video_files: - video_symlink = (video_file) - + video_symlink = output_dir / video_file.name + if video_symlink.exists(): + video_symlink.unlink() + video_symlink.symlink_to(video_file) + video_symlinks.append(video_symlink) # Trigger Facemap Pose Estimation Inference if task_mode == "trigger": # Triggering facemap for pose estimation requires: @@ -350,7 +339,7 @@ def make(self, key): # Instantiate Pose object, with filenames specified as video files, and bounding specified in params # Assumes GUI to be none as we are running CLI implementation pose = facemap_pose.Pose( - filenames=[video_files], + filenames=[video_symlinks], model_name=facemap_model_path.stem, bbox=bbox, bbox_set=bool(bbox), From 71a60638d268f6a075d4f3ef620d190097632a21 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 26 Sep 2023 14:09:54 -0500 Subject: [PATCH 064/182] remove output results copy --- element_facemap/facial_pose_model.py | 30 +++++++--------------------- 1 file changed, 7 insertions(+), 23 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 0a28724..3cc8de3 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -201,7 +201,7 @@ class FacemapPoseEstimationTask(dj.Manual): --- pose_estimation_output_dir='' : varchar(255) # output dir - stores results of Facemap Pose estimation analysis task_mode='trigger' : enum('load', 'trigger') - bbox=[] : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] + bbox=None : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] task_description='' : varchar(128) """ @@ -281,14 +281,15 @@ def make(self, key): facemap_result_path = output_dir / f"{vid_name}_FacemapPose.h5" full_metadata_path = output_dir / f"{vid_name}_FacemapPose_metadata.pkl" - # Create Symbolic Link to raw video data files from outbox location + # Create Symbolic Links to raw video data files from outbox directory video_symlinks = [] for video_file in video_files: video_symlink = output_dir / video_file.name if video_symlink.exists(): video_symlink.unlink() video_symlink.symlink_to(video_file) - video_symlinks.append(video_symlink) + video_symlinks.append(video_symlink) + # Trigger Facemap Pose Estimation Inference if task_mode == "trigger": # Triggering facemap for pose estimation requires: @@ -320,8 +321,6 @@ def make(self, key): return bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") - facemap_video_root_data_dir = Path(video_files[0]).parent - # Model Name of interest should be specified by user during facemap task params manual update model_id = (FacemapPoseEstimationTask & key).fetch("model_id") @@ -346,20 +345,6 @@ def make(self, key): ) pose.run() - # expect single .h5 model and .pkl metadata output in same directory that videos are stored - facemap_result_path = next( - facemap_video_root_data_dir.glob(f"*{vid_name}_FacemapPose.h5") - ) - full_metadata_path = next( - facemap_video_root_data_dir.glob( - f"*{vid_name}_FacemapPose_metadata.pkl" - ) - ) - - # copy local facemap output to output directory - shutil.copy(facemap_result_path, output_dir) - shutil.copy(full_metadata_path, output_dir) - ( body_part_position_entry, inference_duration, @@ -432,14 +417,13 @@ def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: def _load_facemap_results(key, facemap_result_path, full_metadata_path): from facemap import utils - + with open(full_metadata_path, "rb") as f: metadata = pickle.load(f) - keypoints_data = utils.load_keypoints(metadata["bodyparts"], facemap_result_path) - # facemap inferene result is a 3D nested array with D1 - (x,y likelihood), D2 - bodyparts, D3 - frame count + + # Facemap inference result is a 3D nested array with D1 - (x,y likelihood), D2 - bodyparts, D3 - frame count # body parts are ordered the same way as stored - pose_x_coord = keypoints_data[0, :, :] # (bodyparts, frames) pose_y_coord = keypoints_data[1, :, :] # (bodyparts, frames) pose_likelihood = keypoints_data[2, :, :] # (bodyparts, frames) From 7d47a99f271cf79ba78891939cf0dc03296884fe Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 26 Sep 2023 18:22:09 -0500 Subject: [PATCH 065/182] add class method for pose estimation task generation --- element_facemap/facial_pose_model.py | 35 ++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 3cc8de3..6ea17cd 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -206,7 +206,7 @@ class FacemapPoseEstimationTask(dj.Manual): """ @classmethod - def infer_output_dir(self, key, relative=True, mkdir=True): + def infer_output_dir(cls, key, relative=True, mkdir=True): video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0] video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) @@ -222,7 +222,38 @@ def infer_output_dir(self, key, relative=True, mkdir=True): return output_dir.relative_to(processed_dir) if relative else output_dir + + @classmethod + def generate(cls, key, model_id: int, relative_video_paths: list, task_mode: str = "trigger", bbox: list = []): + """Insert video/videos into fbe.VideoRecording table and generate a unique pose estimation task for each of the relative_video_paths + Args: + model_id (int): User Specified model identification number + session_key (dict): + relative_video_paths (list): _description_ + task_mode (str, optional): _description_. Defaults to "trigger". + bbox (list, optional): _description_. Defaults to []. + """ + video_paths = [find_full_path(fbe.get_facemap_root_data_dir(), rpath) for rpath in relative_video_paths] + for vid_path in video_paths: + device_id = (fbe.VideoRecording & key).fetch('device_id') + vrec_key = (fbe.VideoRecording & key).fetch('key') + + model_key = (FacemapModel & f"model_id={model_id}").fetch1("KEY") + pose_estimation_output_dir = cls.infer_output_dir(vrec_key) + + facemap_pose_estimation_task_insert = { + **vrec_key, + **model_key, + "pose_estimation_output_dir": pose_estimation_output_dir, + "task_mode": task_mode, + "bbox": bbox, + } + cls.insert1( + facemap_pose_estimation_task_insert + ) + insert_pose_estimation_task = generate + @schema class FacemapPoseEstimation(dj.Computed): """Results of facemap pose estimation @@ -417,7 +448,7 @@ def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: def _load_facemap_results(key, facemap_result_path, full_metadata_path): from facemap import utils - + with open(full_metadata_path, "rb") as f: metadata = pickle.load(f) keypoints_data = utils.load_keypoints(metadata["bodyparts"], facemap_result_path) From b33be52cd96da01fb163f960cf76b5a5fc15a7a4 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 26 Sep 2023 18:54:31 -0500 Subject: [PATCH 066/182] update comments --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 6ea17cd..09e3c18 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -225,7 +225,7 @@ def infer_output_dir(cls, key, relative=True, mkdir=True): @classmethod def generate(cls, key, model_id: int, relative_video_paths: list, task_mode: str = "trigger", bbox: list = []): - """Insert video/videos into fbe.VideoRecording table and generate a unique pose estimation task for each of the relative_video_paths + """Generate a unique pose estimation task for each of the relative_video_paths Args: model_id (int): User Specified model identification number From 4dad789c6bb967415abfa16c0b385dc5fde6b830 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 26 Sep 2023 18:55:26 -0500 Subject: [PATCH 067/182] add automated video recording insertion --- element_facemap/facial_behavior_estimation.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/element_facemap/facial_behavior_estimation.py b/element_facemap/facial_behavior_estimation.py index bce3907..1037a9f 100644 --- a/element_facemap/facial_behavior_estimation.py +++ b/element_facemap/facial_behavior_estimation.py @@ -141,7 +141,7 @@ class File(dj.Part): Attributes: master (foreign key) : Primary key for VideoRecording table. - file_id (smallint) : File ID. + file_id (smallint) : File ID. file_path ( varchar(255) ) : Filepath of video, relative to root directory. """ @@ -152,6 +152,9 @@ class File(dj.Part): file_path : varchar(255) # filepath of video, relative to root directory """ + @classmethod + def insert_video_recording(cls, key, relative_video_paths, recording_id, device_id): + video_recording_insert = {**key, 'recording_id': recording_id, 'device_id': device_id} @schema class RecordingInfo(dj.Imported): From 635275e5b1c6cc83f7252e1b112671402f2f7704 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 26 Sep 2023 19:25:24 -0500 Subject: [PATCH 068/182] Add bbox default if None --- element_facemap/facial_pose_model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 09e3c18..03e3d34 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -351,7 +351,8 @@ def make(self, key): self.BodyPartPosition.insert(body_part_position_entry) return - bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") + bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") or [] + # Model Name of interest should be specified by user during facemap task params manual update model_id = (FacemapPoseEstimationTask & key).fetch("model_id") From 476afb42970a28dccba7c5258fff02310b725d62 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 26 Sep 2023 19:40:51 -0500 Subject: [PATCH 069/182] keep list of videos for run input as_posix --- element_facemap/facial_pose_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 03e3d34..c6566c8 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -305,7 +305,7 @@ def make(self, key): ) video_files = [ - find_full_path(fbe.get_facemap_root_data_dir(), video_file).as_posix() + find_full_path(fbe.get_facemap_root_data_dir(), video_file) for video_file in video_files ] vid_name = Path(video_files[0]).stem @@ -319,7 +319,7 @@ def make(self, key): if video_symlink.exists(): video_symlink.unlink() video_symlink.symlink_to(video_file) - video_symlinks.append(video_symlink) + video_symlinks.append(video_symlink.as_posix()) # Trigger Facemap Pose Estimation Inference if task_mode == "trigger": From 2e67480027c48cd3df572d23505c9d060f7ebac5 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 27 Sep 2023 11:35:02 -0500 Subject: [PATCH 070/182] remove video recording classmethod --- element_facemap/facial_behavior_estimation.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/element_facemap/facial_behavior_estimation.py b/element_facemap/facial_behavior_estimation.py index 1037a9f..50422e4 100644 --- a/element_facemap/facial_behavior_estimation.py +++ b/element_facemap/facial_behavior_estimation.py @@ -141,7 +141,7 @@ class File(dj.Part): Attributes: master (foreign key) : Primary key for VideoRecording table. - file_id (smallint) : File ID. + file_id (smallint) : File ID. file_path ( varchar(255) ) : Filepath of video, relative to root directory. """ @@ -152,9 +152,6 @@ class File(dj.Part): file_path : varchar(255) # filepath of video, relative to root directory """ - @classmethod - def insert_video_recording(cls, key, relative_video_paths, recording_id, device_id): - video_recording_insert = {**key, 'recording_id': recording_id, 'device_id': device_id} @schema class RecordingInfo(dj.Imported): @@ -526,4 +523,4 @@ def get_loader_result( loaded_dataset = np.load(output_file, allow_pickle=True).item() creation_time = datetime.fromtimestamp(Path(output_file).stat().st_ctime) - return loaded_dataset, creation_time + return loaded_dataset, creation_time \ No newline at end of file From 258985208df5ae18e95f1dcab63ab181eed69873 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 29 Sep 2023 19:05:14 -0500 Subject: [PATCH 071/182] add facial pose estimation training --- element_facemap/train_facial_model.py | 334 ++++++++++++++++++++++++++ 1 file changed, 334 insertions(+) create mode 100644 element_facemap/train_facial_model.py diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py new file mode 100644 index 0000000..697aec4 --- /dev/null +++ b/element_facemap/train_facial_model.py @@ -0,0 +1,334 @@ +import datajoint as dj +import inspect +import importlib +import os +from pathlib import Path +from datetime import datetime +import numpy as np +from element_interface.utils import find_full_path, dict_to_uuid, find_root_directory + +from . import facial_behavior_estimation as fbe +from . import facial_pose_model as facemap_pose + + +schema = dj.schema() +_linking_module = None + + +def activate( + facemap_train_schema_name: str, + fbe_schema_name: str = None, + facemap_model_schema_name: str = None, + *, + create_schema: bool = True, + create_tables: bool = True, + linking_module: str = None, +): + """Activate this schema. + + Args: + facemap_train_schema_name (str): schema name on the database server to activate + the `facemap_train` schema of element-facemap + fbe_schema_name (str): Schema name on the database server to activate the 'facial_behavioral_estimation + facemap_model_schema_name (str): Schema name on the database server to activate the + `facemap_pose_model` schema of element-facemap + create_schema (bool): when True (default), create schema in the database if it + does not yet exist. + create_tables (bool): when True (default), create schema tables in the database + if they do not yet exist. + linking_module (str): a module (or name) containing the required dependencies. + + Dependencies: + Upstream tables: + + Session: A parent table to VideoRecording, identifying a recording session + + Equipment: A parent table to VideoRecording, identifying video recording equipment + + VideoRecording: A parent table to FacemapInferenceTask, identifying videos to be used in inference + Functions: + + get_facemap_root_data_dir() -> list + Retrieves the root data directory(s) with face recordings for all + subject/sessions. Returns a string for the full path to the root data directory. + + get_facemap_processed_data_dir(session_key: dict) -> str + Optional function to retrieve the desired output directory + for Facemap files for a given session. If unspecified, + the output is stored in the video folder for the session, which is the default behavior of Facemap. + Returns a string of the absolute path of the output directory. + + """ + + if isinstance(linking_module, str): + linking_module = importlib.import_module(linking_module) + assert inspect.ismodule( + linking_module + ), "The argument 'dependency' must be a module's name or a module" + assert hasattr( + linking_module, "get_dlc_root_data_dir" + ), "The linking module must specify a lookup function for a root data directory" + + global _linking_module + _linking_module = linking_module + + # activate facial behavioral extimation (fbe) schema + fbe.activate( + fbe_schema_name, + create_schema=create_schema, + create_tables=create_tables, + linking_module=linking_module, + ) + + # activate facial pose model schema + facemap_pose.activate( + facemap_model_schema_name, + create_schema=create_schema, + create_tables=create_tables, + add_objects=_linking_module.__dict__, + ) + + # activate facemap train schema + schema.activate( + facemap_train_schema_name, + create_schema=create_schema, + create_tables=create_tables, + add_objects=_linking_module.__dict__, + ) + + +# ----------------------------- Table declarations ---------------------- + + +@schema +class FacemapTrainVideoSet(dj.Manual): + """Collection of videos included in a given training set. + + Attributes: + video_set_id (int): Unique ID for each collection of videos.""" + + definition = """ # Set of vids in training set + video_set_id: int + """ + + class File(dj.Part): + """File IDs and paths in a given TrainVideoSet + + Attributes: + VideoSet (foreign key): VideoSet key. + file_path ( varchar(255) ): Path to file on disk relative to root.""" + + definition = """ # Paths of training files (e.g., .avi, .mp4, .npy video/ param files) + -> master + file_id: int + --- + file_path: varchar(255) + """ + + +@schema +class FacemapTrainParamSet(dj.Lookup): + """Parameters used to train a model, initial ROIs from (_proc.npy) + + Attributes: + paramset_idx (smallint): Index uniqely identifying each paramset. + paramset_desc ( varchar(128) ): Description of paramset. + param_set_hash (uuid): Hash identifying this paramset. + params (longblob): Dictionary of all applicable parameters. + Note: param_set_hash must be unique.""" + + definition = """ + # Parameters to specify a facemap model training instance + paramset_idx : smallint + --- + keypoints_filename : varchar(255) # + paramset_desc : varchar(128) # Description of parameterset used for + param_set_hash : uuid # hash identifying this paramset + unique index (param_set_hash) + params : longblob # numpy array of initially selected ROIs + """ + + @classmethod + def insert_new_params( + cls, paramset_desc: str, params: dict, paramset_idx: int = None + ): + """ + Insert a new set of training parameters into dlc.TrainingParamSet. + + Args: + paramset_desc (str): Description of parameter set to be inserted + params (dict): Dictionary including all settings to specify model training. + Must include shuffle & trainingsetindex b/c not in config.yaml. + project_path and video_sets will be overwritten by config.yaml. + Note that trainingsetindex is 0-indexed + paramset_idx (int): optional, integer to represent parameters. + """ + + if paramset_idx is None: + paramset_idx = ( + dj.U().aggr(cls, n="max(paramset_idx)").fetch1("n") or 0 + ) + 1 + + param_dict = { + "paramset_idx": paramset_idx, + "paramset_desc": paramset_desc, + "params": params, + "param_set_hash": dict_to_uuid(params), + } + param_query = cls & {"param_set_hash": param_dict["param_set_hash"]} + # If the specified param-set already exists + if param_query: + existing_paramset_idx = param_query.fetch1("paramset_idx") + if existing_paramset_idx == int(paramset_idx): # If existing_idx same: + return # job done + else: + cls.insert1(param_dict) # if duplicate, will raise duplicate error + + + +@schema +class FacemapModelTrainingTask(dj.Manual): + """Staging table for pairing videosets and training parameter sets + + Attributes: + FacemapTrainVideoSet (foreign key): FacemapTrainVideoSet Key. + FacemapTrainingParamSet (foreign key): TrainingParamSet key. + training_task_id (int): Unique ID for training task. + train_output_dir( varchar(255) ): Relative output directory for trained model + refined_model_name ( varchar(32) ): Name for retrained model + model_id (smallint): Unique Model index to be inserted into FacemapModel table + + """ + + definition = """ # Specification for a facemap model training instance + -> FacemapTrainVideoSet # video(s) for training + -> FacemapTrainingParamSet # Initially specified ROIs + training_task_id : smallint + --- + train_output_dir : varchar(255) # Trained model output directory + refined_model_name='refined_model' : varchar(32) + model_id : smallint # Model index for insertion into FacemapModel table + model_description : varchar(255) # Optional, model desc for insertion into FacemapModel + selected_frame_ind=None : smallblob # Array of frames to run training on + """ + def infer_output_dir(self, key, relative=True, mkdir=True): + video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0] + video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent + root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) + + paramset_idx = (FacemapModelTrainingTask & key).fetch1("paramset_idx") + processed_dir = Path(fbe.get_facemap_processed_data_dir()) + output_dir = ( + processed_dir / video_dir.relative_to(root_dir) / f"facemap_train_{paramset_idx}" + ) + + if mkdir: + output_dir.mkdir(parents=True, exist_ok=True) + + return output_dir.relative_to(processed_dir) if relative else output_dir + + +@schema +class FacemapModelTraining(dj.Computed): + """Automated Model training information. + + Attributes: + FacemapModelTrainingTask (foreign key): FacemapModelTrainingTask key. + train_model_time (datetime): Time of creation of newly trained model + latest_snapshot (int unsigned): Latest exact snapshot index (i.e., never -1). + config_template (longblob): Stored full config file.""" + + definition = """ + -> FacemapModelTrainingTask + --- + train_model_time: datetime + + """ + + def make(self, key): + from facemap.pose import pose as facemap_pose + from facemap import utils + import cv2 + import torch + output_dir = find_full_path(fbe.get_facemap_root_data_dir(), output_dir) + video_files = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() for fp in (FacemapTrainVideoSet.File & {'video_set_id': key['video_set_id']}).fetch("file_path")] + paramset_idx = (FacemapModelTrainingTask & key).fetch('paramset_idx') + + + # Create a pose model object, specifying the video files + train_model = facemap_pose.Pose(filenames=[video_files]) + + # Run pose prediction setup to set facemap default model to train_model.net + train_model.pose_prediction_setup() + + + # Convert videos to images for train input + + + pre_selected_frame_ind = (FacemapModelTrainingTask & key).fetch1('selected_frame_ind') + image_data = [] + for video_file in video_files: + if len(pre_selected_frame_ind) == 0: # set selected frames to all frames + + cap = cv2.VideoCapture(video_file) + selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) + else: + selected_frame_ind = pre_selected_frame_ind + + image_data.append(utils.load_images_from_video(video_file, selected_frame_ind)) + + # cumframes, Ly, Lx, containers = utils.get_frame_details(video_files) + + # LY, LX, sy, sx = utils.video_placement(Ly, Lx) + + # reshaped_videos = utils.multivideo_reshape(image_data, LY, LX, Ly, Lx, sy, sx) + + # LIMIT TO SINGLE VIDEO TRAIN for now, can implement multi video later + single_video_data = image_data[0][:,:,:,0] + + + # Can use existing keypoints data stored facemap_pose schema + keypoints_data = (facemap_pose.FacemapPoseEstimation.BodyPartPosition).fetch(as_dict=True) + + # This works, but we would need to store Files in the facial pose model as well, + keypoints_data = utils.load_keypoints(facemap_pose.BodyPart.contents, h5_file) + + # Model Parameters (fetch from TrainingParamSet as dict) + training_params = (FacemapTrainParamSet & f'paramset_idx={paramset_idx}').fetch1('params') + refined_model_name = (FacemapModelTrainingTask & key).fetch1('refined_model_name') # default = "refined_model" + + # Train model + train_model.net = train_model.train(image_data[0][:,:,:,0], + keypoints_data.T, # needs to be transposed + int(training_params['epochs']), + int(training_params['batch_size']), + float(training_params['learning_rate']), + int(training_params['weight_decay']), + bbox=training_params['bbox']) + + + # Save Refined Model + model_output_path = output_dir / f'{refined_model_name}.pth' + torch.save(train_model.net.state_dict(), model_output_path) + + model_id = (FacemapModelTrainingTask & key).fetch1('model_id') + model_description = (FacemapModelTrainingTask & key).fetch1('model_description') + + # Insert newly trained model results into FacemapModel table + try: + model_ids = facemap_pose.FacemapModel.fetch("model_id") + except ValueError: # case that nothing has been inserted + model_id = 0 + if len(model_id) == 0 or model_id in model_ids: + model_id = max(model_ids) + 1 + + model_insert = dict(model_id=model_id, + model_name=refined_model_name, + model_description=model_description) + model_file_insert = dict(model_id=model_id, model_file=model_output_path) + + facemap_pose.FacemapModel.insert_new_model(model_insert) + facemap_pose.FacemapModel.File + + train_model_time = datetime.fromtimestamp(model_output_path.stat().st_mtime).strftime( + "%Y-%m-%d %H:%M:%S" + ) + + self.insert1( + {**key, 'train_model_time': train_model_time} + ) From b8fa30ad6ce6dbd04eaa265ae62d94da789b5862 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 29 Sep 2023 20:06:11 -0500 Subject: [PATCH 072/182] support single video --- element_facemap/train_facial_model.py | 91 +++++++++++++++++++-------- 1 file changed, 66 insertions(+), 25 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 697aec4..fe5694a 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -96,8 +96,8 @@ def activate( @schema -class FacemapTrainVideoSet(dj.Manual): - """Collection of videos included in a given training set. +class FacemapTrainFileSet(dj.Manual): + """Collection of files associated with a given training set. Attributes: video_set_id (int): Unique ID for each collection of videos.""" @@ -113,7 +113,7 @@ class File(dj.Part): VideoSet (foreign key): VideoSet key. file_path ( varchar(255) ): Path to file on disk relative to root.""" - definition = """ # Paths of training files (e.g., .avi, .mp4, .npy video/ param files) + definition = """ # Paths of training files (e.g., .avi, .mp4 video files, .h5 keypoints data file) -> master file_id: int --- @@ -136,7 +136,6 @@ class FacemapTrainParamSet(dj.Lookup): # Parameters to specify a facemap model training instance paramset_idx : smallint --- - keypoints_filename : varchar(255) # paramset_desc : varchar(128) # Description of parameterset used for param_set_hash : uuid # hash identifying this paramset unique index (param_set_hash) @@ -203,6 +202,7 @@ class FacemapModelTrainingTask(dj.Manual): train_output_dir : varchar(255) # Trained model output directory refined_model_name='refined_model' : varchar(32) model_id : smallint # Model index for insertion into FacemapModel table + retrain_model_id : smallint # Model index for loading of model_description : varchar(255) # Optional, model desc for insertion into FacemapModel selected_frame_ind=None : smallblob # Array of frames to run training on """ @@ -246,9 +246,15 @@ def make(self, key): import cv2 import torch output_dir = find_full_path(fbe.get_facemap_root_data_dir(), output_dir) - video_files = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() for fp in (FacemapTrainVideoSet.File & {'video_set_id': key['video_set_id']}).fetch("file_path")] + + train_fileset = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() + for fp in (FacemapTrainFileSet.File & + {'video_set_id': key['video_set_id']}).fetch("file_path")] paramset_idx = (FacemapModelTrainingTask & key).fetch('paramset_idx') + video_suffixes = ['.mp4','.avi'] + h5_filepaths = [f for f in train_fileset if f.endswith('.h5')] + video_files = [f for f in train_fileset if any(f.endswith(s) for s in video_suffixes)] # Create a pose model object, specifying the video files train_model = facemap_pose.Pose(filenames=[video_files]) @@ -258,42 +264,53 @@ def make(self, key): # Convert videos to images for train input + pre_selected_frame_ind = (FacemapModelTrainingTask & key).fetch1('selected_frame_ind') + + # Only support single video training + assert len(video_files) == 1 - pre_selected_frame_ind = (FacemapModelTrainingTask & key).fetch1('selected_frame_ind') - image_data = [] - for video_file in video_files: - if len(pre_selected_frame_ind) == 0: # set selected frames to all frames + video_file = video_files[0] + if len(pre_selected_frame_ind) == 0: # set selected frames to all frames - cap = cv2.VideoCapture(video_file) - selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) - else: - selected_frame_ind = pre_selected_frame_ind + cap = cv2.VideoCapture(video_file) + selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) + else: + selected_frame_ind = pre_selected_frame_ind + image_data = utils.load_images_from_video(video_file, selected_frame_ind) - image_data.append(utils.load_images_from_video(video_file, selected_frame_ind)) + # MULTIVIDEO TODO + # image_data = [] + # for video_file in video_files: + # if len(pre_selected_frame_ind) == 0: # set selected frames to all frames - # cumframes, Ly, Lx, containers = utils.get_frame_details(video_files) + # cap = cv2.VideoCapture(video_file) + # selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) + # else: + # selected_frame_ind = pre_selected_frame_ind + + # image_data.append(utils.load_images_from_video(video_file, selected_frame_ind)) + # -- For multivideo image data reshaping + # cumframes, Ly, Lx, containers = utils.get_frame_details(video_files) # LY, LX, sy, sx = utils.video_placement(Ly, Lx) - # reshaped_videos = utils.multivideo_reshape(image_data, LY, LX, Ly, Lx, sy, sx) - # LIMIT TO SINGLE VIDEO TRAIN for now, can implement multi video later - single_video_data = image_data[0][:,:,:,0] - # Can use existing keypoints data stored facemap_pose schema - keypoints_data = (facemap_pose.FacemapPoseEstimation.BodyPartPosition).fetch(as_dict=True) + # keypoints_data = (facemap_pose.FacemapPoseEstimation.BodyPartPosition).fetch(as_dict=True) + + keypoints_file = (FacemapModelTrainingTask & key).fetch('keypoints_filename') # This works, but we would need to store Files in the facial pose model as well, - keypoints_data = utils.load_keypoints(facemap_pose.BodyPart.contents, h5_file) + keypoints_data = utils.load_keypoints(facemap_pose.BodyPart.contents, keypoints_file) # Model Parameters (fetch from TrainingParamSet as dict) training_params = (FacemapTrainParamSet & f'paramset_idx={paramset_idx}').fetch1('params') refined_model_name = (FacemapModelTrainingTask & key).fetch1('refined_model_name') # default = "refined_model" - # Train model - train_model.net = train_model.train(image_data[0][:,:,:,0], + # Train model using train function defined in Pose class + train_model.net = train_model.train(image_data[:,:,:,0], keypoints_data.T, # needs to be transposed int(training_params['epochs']), int(training_params['batch_size']), @@ -302,6 +319,30 @@ def make(self, key): bbox=training_params['bbox']) + # Alternate (requires more imports, but allows for access to training object that can be used for cross validation) + from facemap.pose import model_training, datasets + + dataset = datasets.FacemapDataset( + image_data=image_data, + keypoints_data=keypoints_data.T, + bbox=training_params['bbox'], + ) + # Create a dataloader object for training + dataloader = torch.utils.data.DataLoader( + dataset, batch_size=int(training_params['batch_size']), shuffle=True + ) + # Use preprocessed data to train the model + train_model.net = model_training.train( + dataloader, + train_model.net, + int(training_params['epochs']), + int(training_params['weight_decay']), + ) + print("Model training complete!") + return self.net + + + # Save Refined Model model_output_path = output_dir / f'{refined_model_name}.pth' torch.save(train_model.net.state_dict(), model_output_path) @@ -312,10 +353,10 @@ def make(self, key): # Insert newly trained model results into FacemapModel table try: model_ids = facemap_pose.FacemapModel.fetch("model_id") + if len(model_id) == 0 or model_id in model_ids: + model_id = max(model_ids) + 1 except ValueError: # case that nothing has been inserted model_id = 0 - if len(model_id) == 0 or model_id in model_ids: - model_id = max(model_ids) + 1 model_insert = dict(model_id=model_id, model_name=refined_model_name, From 6e67fc22913c4e1188b9bca0c0e8e49b8d6344d2 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 29 Sep 2023 22:51:43 -0500 Subject: [PATCH 073/182] comment second training approach --- element_facemap/train_facial_model.py | 49 +++++++++++++-------------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index fe5694a..3d24959 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -236,8 +236,8 @@ class FacemapModelTraining(dj.Computed): definition = """ -> FacemapModelTrainingTask --- - train_model_time: datetime - + train_model_time : datetime # Time of creation of train model file + train_model : longblob # Dictionary containing model.net state """ def make(self, key): @@ -319,30 +319,29 @@ def make(self, key): bbox=training_params['bbox']) - # Alternate (requires more imports, but allows for access to training object that can be used for cross validation) - from facemap.pose import model_training, datasets - - dataset = datasets.FacemapDataset( - image_data=image_data, - keypoints_data=keypoints_data.T, - bbox=training_params['bbox'], - ) - # Create a dataloader object for training - dataloader = torch.utils.data.DataLoader( - dataset, batch_size=int(training_params['batch_size']), shuffle=True - ) - # Use preprocessed data to train the model - train_model.net = model_training.train( - dataloader, - train_model.net, - int(training_params['epochs']), - int(training_params['weight_decay']), - ) - print("Model training complete!") - return self.net + # Alternate (requires more imports, but allows for access to model_training object that can be used for cross validation) + # from facemap.pose import model_training, datasets + + # dataset = datasets.FacemapDataset( + # image_data=image_data, + # keypoints_data=keypoints_data.T, + # bbox=training_params['bbox'], + # ) + # # Create a dataloader object for training + # dataloader = torch.utils.data.DataLoader( + # dataset, batch_size=int(training_params['batch_size']), shuffle=True + # ) + # # Use preprocessed data to train the model + # train_model.net = model_training.train( + # dataloader, + # train_model.net, + # int(training_params['epochs']), + # int(training_params['weight_decay']), + # ) + + # pred_keypoints, keypoints = model_training.get_test_predictions(train_model.net, test_dataset) - # Save Refined Model model_output_path = output_dir / f'{refined_model_name}.pth' torch.save(train_model.net.state_dict(), model_output_path) @@ -371,5 +370,5 @@ def make(self, key): ) self.insert1( - {**key, 'train_model_time': train_model_time} + {**key, 'train_model_time': train_model_time, 'train_model': train_model.net.state_dict()} ) From 575fbd4862e2cd28ec0ab6fb7eab72159c0ad594 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Sun, 1 Oct 2023 17:34:35 -0500 Subject: [PATCH 074/182] bugfix --- element_facemap/train_facial_model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 3d24959..bf32236 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -61,7 +61,7 @@ def activate( linking_module ), "The argument 'dependency' must be a module's name or a module" assert hasattr( - linking_module, "get_dlc_root_data_dir" + linking_module, "get_facemap_root_data_dir" ), "The linking module must specify a lookup function for a root data directory" global _linking_module @@ -185,7 +185,7 @@ class FacemapModelTrainingTask(dj.Manual): """Staging table for pairing videosets and training parameter sets Attributes: - FacemapTrainVideoSet (foreign key): FacemapTrainVideoSet Key. + FacemapTrainFileSet (foreign key): FacemapTrainFileSet Key. FacemapTrainingParamSet (foreign key): TrainingParamSet key. training_task_id (int): Unique ID for training task. train_output_dir( varchar(255) ): Relative output directory for trained model @@ -195,7 +195,7 @@ class FacemapModelTrainingTask(dj.Manual): """ definition = """ # Specification for a facemap model training instance - -> FacemapTrainVideoSet # video(s) for training + -> FacemapTrainFileSet # video(s) and files for training -> FacemapTrainingParamSet # Initially specified ROIs training_task_id : smallint --- From ed64222b10531054990e45946ca02e0672b99314 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Sun, 1 Oct 2023 18:00:00 -0500 Subject: [PATCH 075/182] remove add objects from imported schema activate --- element_facemap/train_facial_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index bf32236..62dfbb4 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -80,7 +80,7 @@ def activate( facemap_model_schema_name, create_schema=create_schema, create_tables=create_tables, - add_objects=_linking_module.__dict__, + linking_module=linking_module, ) # activate facemap train schema From e7d4a622b13403f72678e631e65abf4d108463e3 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Sun, 1 Oct 2023 18:04:38 -0500 Subject: [PATCH 076/182] fix typo training to train --- element_facemap/train_facial_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 62dfbb4..3441ef5 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -186,7 +186,7 @@ class FacemapModelTrainingTask(dj.Manual): Attributes: FacemapTrainFileSet (foreign key): FacemapTrainFileSet Key. - FacemapTrainingParamSet (foreign key): TrainingParamSet key. + FacemapTrainParamSet (foreign key): TrainingParamSet key. training_task_id (int): Unique ID for training task. train_output_dir( varchar(255) ): Relative output directory for trained model refined_model_name ( varchar(32) ): Name for retrained model @@ -196,7 +196,7 @@ class FacemapModelTrainingTask(dj.Manual): definition = """ # Specification for a facemap model training instance -> FacemapTrainFileSet # video(s) and files for training - -> FacemapTrainingParamSet # Initially specified ROIs + -> FacemapTrainParamSet # Initially specified ROIs training_task_id : smallint --- train_output_dir : varchar(255) # Trained model output directory From ba4b2f251a118d8f554d1b0a3b7a63b9ca3df1b4 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Sun, 1 Oct 2023 18:13:52 -0500 Subject: [PATCH 077/182] modify training task table definition --- element_facemap/train_facial_model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 3441ef5..539fccd 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -195,8 +195,8 @@ class FacemapModelTrainingTask(dj.Manual): """ definition = """ # Specification for a facemap model training instance - -> FacemapTrainFileSet # video(s) and files for training - -> FacemapTrainParamSet # Initially specified ROIs + -> FacemapTrainFileSet # video(s) and files for training + -> FacemapTrainParamSet # Initially specified ROIs training_task_id : smallint --- train_output_dir : varchar(255) # Trained model output directory @@ -204,7 +204,7 @@ class FacemapModelTrainingTask(dj.Manual): model_id : smallint # Model index for insertion into FacemapModel table retrain_model_id : smallint # Model index for loading of model_description : varchar(255) # Optional, model desc for insertion into FacemapModel - selected_frame_ind=None : smallblob # Array of frames to run training on + selected_frame_ind : smallblob # Array of frames to run training on """ def infer_output_dir(self, key, relative=True, mkdir=True): video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0] From c019906bc7feccff0fe1a7394bec3f7c041b4b4c Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Sun, 1 Oct 2023 18:20:27 -0500 Subject: [PATCH 078/182] update comments --- element_facemap/train_facial_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 539fccd..3e44d4d 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -107,10 +107,10 @@ class FacemapTrainFileSet(dj.Manual): """ class File(dj.Part): - """File IDs and paths in a given TrainVideoSet + """File IDs and paths in a given FacemapTrainFileSet Attributes: - VideoSet (foreign key): VideoSet key. + FacemapTrainFileSet (foreign key): FacemapTrainFileSet key. file_path ( varchar(255) ): Path to file on disk relative to root.""" definition = """ # Paths of training files (e.g., .avi, .mp4 video files, .h5 keypoints data file) From 1b3b3eccd98d47f83806ac29468aec9f71e9afc8 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Sun, 1 Oct 2023 18:22:05 -0500 Subject: [PATCH 079/182] fix comment --- element_facemap/train_facial_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 3e44d4d..d72f772 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -166,8 +166,8 @@ def insert_new_params( param_dict = { "paramset_idx": paramset_idx, "paramset_desc": paramset_desc, - "params": params, "param_set_hash": dict_to_uuid(params), + "params": params, } param_query = cls & {"param_set_hash": param_dict["param_set_hash"]} # If the specified param-set already exists From 00d9c8566b178d1b986dcd6770013cfd666aa0a5 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Sun, 1 Oct 2023 18:37:13 -0500 Subject: [PATCH 080/182] change smallblob to tinyblob typo --- element_facemap/train_facial_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index d72f772..f795e8d 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -204,7 +204,7 @@ class FacemapModelTrainingTask(dj.Manual): model_id : smallint # Model index for insertion into FacemapModel table retrain_model_id : smallint # Model index for loading of model_description : varchar(255) # Optional, model desc for insertion into FacemapModel - selected_frame_ind : smallblob # Array of frames to run training on + selected_frame_ind : tinyblob # Array of frames to run training on """ def infer_output_dir(self, key, relative=True, mkdir=True): video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0] From ee49c93ff9d6ed5b955f9fbed4c58523c1be3fb3 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Sun, 1 Oct 2023 19:52:59 -0500 Subject: [PATCH 081/182] add facemap task insertion class method --- element_facemap/train_facial_model.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index f795e8d..e2848da 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -207,8 +207,8 @@ class FacemapModelTrainingTask(dj.Manual): selected_frame_ind : tinyblob # Array of frames to run training on """ def infer_output_dir(self, key, relative=True, mkdir=True): - video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0] - video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent + video_files = (FacemapTrainFileSet.File & key).fetch("file_path", limit=1)[0] + video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_files[0]).parent root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) paramset_idx = (FacemapModelTrainingTask & key).fetch1("paramset_idx") @@ -222,7 +222,20 @@ def infer_output_dir(self, key, relative=True, mkdir=True): return output_dir.relative_to(processed_dir) if relative else output_dir + @classmethod + def insert_facemap_training_task(cls, key, training_task_id, refined_model_name, model_description, selected_frame_ind, train_output_dir, model_id=None): + + vrec_key = (fbe.VideoRecording & key).fetch('key') + facemap_training_task_insert = dict(**key, + training_task_id=training_task_id, + train_output_dir=train_output_dir, + refined_model_name=refined_model_name, + selected_frame_ind=selected_frame_ind, + model_description=model_description, + model_id=model_id) + cls.infer_output_dir(vrec_key) + @schema class FacemapModelTraining(dj.Computed): """Automated Model training information. @@ -303,7 +316,7 @@ def make(self, key): keypoints_file = (FacemapModelTrainingTask & key).fetch('keypoints_filename') # This works, but we would need to store Files in the facial pose model as well, - keypoints_data = utils.load_keypoints(facemap_pose.BodyPart.contents, keypoints_file) + keypoints_data = utils.load_keypoints('pafacemap_pose.BodyPart.contents, keypoints_file) # Model Parameters (fetch from TrainingParamSet as dict) training_params = (FacemapTrainParamSet & f'paramset_idx={paramset_idx}').fetch1('params') From 23ccc591019231ab281916721727fca62ff0947e Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 2 Oct 2023 11:08:59 -0500 Subject: [PATCH 082/182] fix output dir --- element_facemap/train_facial_model.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index e2848da..eb729f3 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -202,9 +202,9 @@ class FacemapModelTrainingTask(dj.Manual): train_output_dir : varchar(255) # Trained model output directory refined_model_name='refined_model' : varchar(32) model_id : smallint # Model index for insertion into FacemapModel table - retrain_model_id : smallint # Model index for loading of + retrain_model_id : smallint # Model index of model to be loaded for retraining model_description : varchar(255) # Optional, model desc for insertion into FacemapModel - selected_frame_ind : tinyblob # Array of frames to run training on + selected_frame_ind : blob # Array of frames to run training on """ def infer_output_dir(self, key, relative=True, mkdir=True): video_files = (FacemapTrainFileSet.File & key).fetch("file_path", limit=1)[0] @@ -228,7 +228,7 @@ def insert_facemap_training_task(cls, key, training_task_id, refined_model_name, vrec_key = (fbe.VideoRecording & key).fetch('key') facemap_training_task_insert = dict(**key, training_task_id=training_task_id, - train_output_dir=train_output_dir, + train_output_dir=train_output_dir.relative_to(fbe.get_facemap_root_data_dir()), refined_model_name=refined_model_name, selected_frame_ind=selected_frame_ind, model_description=model_description, @@ -258,7 +258,9 @@ def make(self, key): from facemap import utils import cv2 import torch - output_dir = find_full_path(fbe.get_facemap_root_data_dir(), output_dir) + + train_output_dir = (FacemapModelTrainingTask & key).fetch1('train_output_dir') + output_dir = find_full_path(fbe.get_facemap_root_data_dir(), train_output_dir) train_fileset = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() for fp in (FacemapTrainFileSet.File & @@ -280,7 +282,7 @@ def make(self, key): pre_selected_frame_ind = (FacemapModelTrainingTask & key).fetch1('selected_frame_ind') - # Only support single video training + # Currently, only support single video training assert len(video_files) == 1 video_file = video_files[0] @@ -316,7 +318,7 @@ def make(self, key): keypoints_file = (FacemapModelTrainingTask & key).fetch('keypoints_filename') # This works, but we would need to store Files in the facial pose model as well, - keypoints_data = utils.load_keypoints('pafacemap_pose.BodyPart.contents, keypoints_file) + keypoints_data = utils.load_keypoints(facemap_pose.BodyPart.contents, keypoints_file) # Model Parameters (fetch from TrainingParamSet as dict) training_params = (FacemapTrainParamSet & f'paramset_idx={paramset_idx}').fetch1('params') From 99064cde364fa62e78bb6f27350dbce4faaad9ae Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 20:24:51 -0500 Subject: [PATCH 083/182] restructure keypoints file --- element_facemap/train_facial_model.py | 123 +++++++++++++++++--------- 1 file changed, 81 insertions(+), 42 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index eb729f3..4c8684b 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -100,20 +100,36 @@ class FacemapTrainFileSet(dj.Manual): """Collection of files associated with a given training set. Attributes: - video_set_id (int): Unique ID for each collection of videos.""" + file_set_id (int): Unique ID for each collection of training files.""" definition = """ # Set of vids in training set - video_set_id: int + file_set_id: int """ - class File(dj.Part): - """File IDs and paths in a given FacemapTrainFileSet + class VideoFile(dj.Part): + """Video File IDs and paths in a given FacemapTrainFileSet Attributes: - FacemapTrainFileSet (foreign key): FacemapTrainFileSet key. - file_path ( varchar(255) ): Path to file on disk relative to root.""" + FacemapTrainFileSet (foreign key) : FacemapTrainFileSet key. + video_file_id (int) : Video File index + video_file_path ( varchar(255) ) : Path to file on disk relative to root.""" - definition = """ # Paths of training files (e.g., .avi, .mp4 video files, .h5 keypoints data file) + definition = """ # Paths of training files (e.g., .avi, .mp4 video files) + -> master + video_file_id: int + --- + video_file_path: varchar(255) + """ + + class KeypointsFile(dj.Part): + """Non video File IDs and paths in a given FacemapTrainFileSet + + Attributes: + FacemapTrainFileSet (foreign key) : FacemapTrainFileSet key. + file_id : Keypoint File index. + file_path ( varchar(255) ) : Path to file on disk relative to root.""" + + definition = """ # Paths of training files (e.g.: .h5 keypoints data file) -> master file_id: int --- @@ -204,17 +220,17 @@ class FacemapModelTrainingTask(dj.Manual): model_id : smallint # Model index for insertion into FacemapModel table retrain_model_id : smallint # Model index of model to be loaded for retraining model_description : varchar(255) # Optional, model desc for insertion into FacemapModel - selected_frame_ind : blob # Array of frames to run training on + selected_frame_ind : blob # Array of frames to run training on + keypoints_filename : varchar(64) # Specify keypoints filename if multiple keypoints files are stored """ def infer_output_dir(self, key, relative=True, mkdir=True): - video_files = (FacemapTrainFileSet.File & key).fetch("file_path", limit=1)[0] - video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_files[0]).parent + video_file = (FacemapTrainFileSet.File & key).fetch("file_path", limit=1)[0] + video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) - paramset_idx = (FacemapModelTrainingTask & key).fetch1("paramset_idx") processed_dir = Path(fbe.get_facemap_processed_data_dir()) output_dir = ( - processed_dir / video_dir.relative_to(root_dir) / f"facemap_train_{paramset_idx}" + processed_dir / video_dir.relative_to(root_dir) / f"facemap_train_{key['paramset_idx']}" ) if mkdir: @@ -223,18 +239,28 @@ def infer_output_dir(self, key, relative=True, mkdir=True): return output_dir.relative_to(processed_dir) if relative else output_dir @classmethod - def insert_facemap_training_task(cls, key, training_task_id, refined_model_name, model_description, selected_frame_ind, train_output_dir, model_id=None): - - vrec_key = (fbe.VideoRecording & key).fetch('key') + def insert_facemap_training_task(cls, + file_set_key, + training_task_id, + paramset_idx, + refined_model_name, + model_description, + selected_frame_ind, + model_id=None, + retrain_model_id=None): + key = {**file_set_key, "paramset_idx": paramset_idx} + inferred_output_dir = cls.infer_output_dir(key, relative=True, mkdir=True) facemap_training_task_insert = dict(**key, training_task_id=training_task_id, - train_output_dir=train_output_dir.relative_to(fbe.get_facemap_root_data_dir()), + train_output_dir=inferred_output_dir, refined_model_name=refined_model_name, selected_frame_ind=selected_frame_ind, model_description=model_description, - model_id=model_id) - cls.infer_output_dir(vrec_key) - + model_id=model_id, + retrain_model_id=retrain_model_id) + + facemap_training_task_insert.update({'train_output_dir': inferred_output_dir.as_posix()}) + cls.insert1(facemap_training_task_insert) @schema class FacemapModelTraining(dj.Computed): @@ -262,25 +288,41 @@ def make(self, key): train_output_dir = (FacemapModelTrainingTask & key).fetch1('train_output_dir') output_dir = find_full_path(fbe.get_facemap_root_data_dir(), train_output_dir) + video_files = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() + for fp in (FacemapTrainFileSet.VideoFile & + {'video_set_id': key['video_set_id']}).fetch("file_path")] + + # manually specified .h5 keypoints file train_fileset = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() for fp in (FacemapTrainFileSet.File & - {'video_set_id': key['video_set_id']}).fetch("file_path")] - paramset_idx = (FacemapModelTrainingTask & key).fetch('paramset_idx') + {'file_set_id': key['video_set_id']}).fetch("file_path")] + + keypoints_file_name = (FacemapModelTrainingTask & key).fetch1("keypoints_filename") + + keypoints_file = [f for f in train_fileset if keypoints_file_name in f] + if len(keypoints_file) > 0: + keypoints_file = keypoints_file[0] # if multiple keypoints files are specified select first file - video_suffixes = ['.mp4','.avi'] h5_filepaths = [f for f in train_fileset if f.endswith('.h5')] - video_files = [f for f in train_fileset if any(f.endswith(s) for s in video_suffixes)] + retrain_model_id = key['retrain_model_id'] # Create a pose model object, specifying the video files - train_model = facemap_pose.Pose(filenames=[video_files]) - - # Run pose prediction setup to set facemap default model to train_model.net - train_model.pose_prediction_setup() - + train_model = facemap_pose.Pose(filename=[video_files]) + train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox + + if len(retrain_model_id) > 0: # Retrain an existing model from the facemap_pose.FacemapModel table + # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() + model_file = (facemap_pose.FacemapModel.File & {'model_id': retrain_model_id}).fetch1("model_file") + + # Set train_model object to load preexisting model + train_model.model_name = model_file + # Overwrite default train_model.net + train_model.net.load_state_dict(torch.load(model_file, map_location=train_model.device)) + # link model to torch device + train_model.net.to(train_model.device) # Convert videos to images for train input pre_selected_frame_ind = (FacemapModelTrainingTask & key).fetch1('selected_frame_ind') - # Currently, only support single video training assert len(video_files) == 1 @@ -292,6 +334,8 @@ def make(self, key): selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) else: selected_frame_ind = pre_selected_frame_ind + + # Load image frames from video image_data = utils.load_images_from_video(video_file, selected_frame_ind) # MULTIVIDEO TODO @@ -313,19 +357,17 @@ def make(self, key): # Can use existing keypoints data stored facemap_pose schema - # keypoints_data = (facemap_pose.FacemapPoseEstimation.BodyPartPosition).fetch(as_dict=True) - - keypoints_file = (FacemapModelTrainingTask & key).fetch('keypoints_filename') + keypoints_file = # This works, but we would need to store Files in the facial pose model as well, keypoints_data = utils.load_keypoints(facemap_pose.BodyPart.contents, keypoints_file) # Model Parameters (fetch from TrainingParamSet as dict) - training_params = (FacemapTrainParamSet & f'paramset_idx={paramset_idx}').fetch1('params') + training_params = (FacemapTrainParamSet & f'paramset_idx={key["paramset_idx"]}').fetch1('params') refined_model_name = (FacemapModelTrainingTask & key).fetch1('refined_model_name') # default = "refined_model" # Train model using train function defined in Pose class - train_model.net = train_model.train(image_data[:,:,:,0], + train_model.net = train_model.train(image_data[:,:,:,0], # note: using 0 index for now (could average across this dimension) keypoints_data.T, # needs to be transposed int(training_params['epochs']), int(training_params['batch_size']), @@ -361,7 +403,7 @@ def make(self, key): model_output_path = output_dir / f'{refined_model_name}.pth' torch.save(train_model.net.state_dict(), model_output_path) - model_id = (FacemapModelTrainingTask & key).fetch1('model_id') + model_id = key['model_id'] model_description = (FacemapModelTrainingTask & key).fetch1('model_description') # Insert newly trained model results into FacemapModel table @@ -372,13 +414,10 @@ def make(self, key): except ValueError: # case that nothing has been inserted model_id = 0 - model_insert = dict(model_id=model_id, - model_name=refined_model_name, - model_description=model_description) - model_file_insert = dict(model_id=model_id, model_file=model_output_path) - - facemap_pose.FacemapModel.insert_new_model(model_insert) - facemap_pose.FacemapModel.File + facemap_pose.FacemapModel().insert_new_model(model_id, + refined_model_name, + model_description, + model_output_path) train_model_time = datetime.fromtimestamp(model_output_path.stat().st_mtime).strftime( "%Y-%m-%d %H:%M:%S" From a6bc4f1227bd5f668b2c0a89504006ce93f79aac Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 20:39:47 -0500 Subject: [PATCH 084/182] modify keypoints file loading from fileset --- element_facemap/train_facial_model.py | 45 +++++++++++---------------- 1 file changed, 18 insertions(+), 27 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 4c8684b..5d4abdb 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -122,7 +122,7 @@ class VideoFile(dj.Part): """ class KeypointsFile(dj.Part): - """Non video File IDs and paths in a given FacemapTrainFileSet + """Keypoints File containing labels and paths in a given FacemapTrainFileSet Attributes: FacemapTrainFileSet (foreign key) : FacemapTrainFileSet key. @@ -282,7 +282,6 @@ class FacemapModelTraining(dj.Computed): def make(self, key): from facemap.pose import pose as facemap_pose from facemap import utils - import cv2 import torch train_output_dir = (FacemapModelTrainingTask & key).fetch1('train_output_dir') @@ -301,18 +300,15 @@ def make(self, key): keypoints_file = [f for f in train_fileset if keypoints_file_name in f] if len(keypoints_file) > 0: - keypoints_file = keypoints_file[0] # if multiple keypoints files are specified select first file + keypoints_file = keypoints_file[0] # if multiple keypoints files are specified, select first file - h5_filepaths = [f for f in train_fileset if f.endswith('.h5')] - - retrain_model_id = key['retrain_model_id'] # Create a pose model object, specifying the video files train_model = facemap_pose.Pose(filename=[video_files]) train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox - if len(retrain_model_id) > 0: # Retrain an existing model from the facemap_pose.FacemapModel table + if len(key['retrain_model_id']) > 0: # Retrain an existing model from the facemap_pose.FacemapModel table # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() - model_file = (facemap_pose.FacemapModel.File & {'model_id': retrain_model_id}).fetch1("model_file") + model_file = (facemap_pose.FacemapModel.File & {'model_id': key['retrain_model_id']}).fetch1("model_file") # Set train_model object to load preexisting model train_model.model_name = model_file @@ -329,7 +325,7 @@ def make(self, key): video_file = video_files[0] if len(pre_selected_frame_ind) == 0: # set selected frames to all frames - + import cv2 cap = cv2.VideoCapture(video_file) selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) else: @@ -338,28 +334,23 @@ def make(self, key): # Load image frames from video image_data = utils.load_images_from_video(video_file, selected_frame_ind) - # MULTIVIDEO TODO - # image_data = [] - # for video_file in video_files: - # if len(pre_selected_frame_ind) == 0: # set selected frames to all frames + # MULTIVIDEO TODO + # image_data = [] + # for video_file in video_files: + # if len(pre_selected_frame_ind) == 0: # set selected frames to all frames - # cap = cv2.VideoCapture(video_file) - # selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) - # else: - # selected_frame_ind = pre_selected_frame_ind + # cap = cv2.VideoCapture(video_file) + # selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) + # else: + # selected_frame_ind = pre_selected_frame_ind - # image_data.append(utils.load_images_from_video(video_file, selected_frame_ind)) + # image_data.append(utils.load_images_from_video(video_file, selected_frame_ind)) - # -- For multivideo image data reshaping - # cumframes, Ly, Lx, containers = utils.get_frame_details(video_files) - # LY, LX, sy, sx = utils.video_placement(Ly, Lx) - # reshaped_videos = utils.multivideo_reshape(image_data, LY, LX, Ly, Lx, sy, sx) - - - # Can use existing keypoints data stored facemap_pose schema - keypoints_file = + # -- For multivideo image data reshaping + # cumframes, Ly, Lx, containers = utils.get_frame_details(video_files) + # LY, LX, sy, sx = utils.video_placement(Ly, Lx) + # reshaped_videos = utils.multivideo_reshape(image_data, LY, LX, Ly, Lx, sy, sx) - # This works, but we would need to store Files in the facial pose model as well, keypoints_data = utils.load_keypoints(facemap_pose.BodyPart.contents, keypoints_file) # Model Parameters (fetch from TrainingParamSet as dict) From 89f6a16f72b10175a0e860a13885b23176d65d0f Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:03:28 -0500 Subject: [PATCH 085/182] modify insert_facemap_training task class function --- element_facemap/train_facial_model.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 5d4abdb..552bf98 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -243,23 +243,22 @@ def insert_facemap_training_task(cls, file_set_key, training_task_id, paramset_idx, - refined_model_name, - model_description, - selected_frame_ind, + refined_model_name='refined_model', + model_description=None, + selected_frame_ind=None, + keypoints_filename="", model_id=None, retrain_model_id=None): key = {**file_set_key, "paramset_idx": paramset_idx} inferred_output_dir = cls.infer_output_dir(key, relative=True, mkdir=True) facemap_training_task_insert = dict(**key, training_task_id=training_task_id, - train_output_dir=inferred_output_dir, + train_output_dir=inferred_output_dir.as_posix(), refined_model_name=refined_model_name, selected_frame_ind=selected_frame_ind, model_description=model_description, model_id=model_id, retrain_model_id=retrain_model_id) - - facemap_training_task_insert.update({'train_output_dir': inferred_output_dir.as_posix()}) cls.insert1(facemap_training_task_insert) @schema From 793caa7b6dc5bd9e94d688c215d61b11a6137987 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:05:20 -0500 Subject: [PATCH 086/182] modify facemap training task insert --- element_facemap/train_facial_model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 552bf98..a2495dd 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -257,6 +257,7 @@ def insert_facemap_training_task(cls, refined_model_name=refined_model_name, selected_frame_ind=selected_frame_ind, model_description=model_description, + keypoints_filename=keypoints_filename, model_id=model_id, retrain_model_id=retrain_model_id) cls.insert1(facemap_training_task_insert) From 088f5d3c912a7c846a2cb575f517e459a9f6db40 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:08:26 -0500 Subject: [PATCH 087/182] modify training task insertion key --- element_facemap/train_facial_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index a2495dd..61aa4e8 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -240,7 +240,7 @@ def infer_output_dir(self, key, relative=True, mkdir=True): @classmethod def insert_facemap_training_task(cls, - file_set_key, + file_set_id, training_task_id, paramset_idx, refined_model_name='refined_model', @@ -249,7 +249,7 @@ def insert_facemap_training_task(cls, keypoints_filename="", model_id=None, retrain_model_id=None): - key = {**file_set_key, "paramset_idx": paramset_idx} + key = {"file_set_id": file_set_id, "paramset_idx": paramset_idx} inferred_output_dir = cls.infer_output_dir(key, relative=True, mkdir=True) facemap_training_task_insert = dict(**key, training_task_id=training_task_id, From a36d9614afc5c34b65a5133414efbeb4dcb6a3f5 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:19:12 -0500 Subject: [PATCH 088/182] call infer output dir as class method --- element_facemap/train_facial_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 61aa4e8..42831f2 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -250,7 +250,7 @@ def insert_facemap_training_task(cls, model_id=None, retrain_model_id=None): key = {"file_set_id": file_set_id, "paramset_idx": paramset_idx} - inferred_output_dir = cls.infer_output_dir(key, relative=True, mkdir=True) + inferred_output_dir = cls().infer_output_dir(key, relative=True, mkdir=True) facemap_training_task_insert = dict(**key, training_task_id=training_task_id, train_output_dir=inferred_output_dir.as_posix(), From 5389445f6918cf71c41a983ec785b6b955c97e7f Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:21:10 -0500 Subject: [PATCH 089/182] fix rename typo --- element_facemap/train_facial_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 42831f2..7b24195 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -224,7 +224,7 @@ class FacemapModelTrainingTask(dj.Manual): keypoints_filename : varchar(64) # Specify keypoints filename if multiple keypoints files are stored """ def infer_output_dir(self, key, relative=True, mkdir=True): - video_file = (FacemapTrainFileSet.File & key).fetch("file_path", limit=1)[0] + video_file = (FacemapTrainFileSet.VideoFile & key).fetch("file_path", limit=1)[0] video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) From 6f75740198035a59c930f790ea0acec9222245bb Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:23:03 -0500 Subject: [PATCH 090/182] fix typo --- element_facemap/train_facial_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 7b24195..2b10285 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -224,7 +224,7 @@ class FacemapModelTrainingTask(dj.Manual): keypoints_filename : varchar(64) # Specify keypoints filename if multiple keypoints files are stored """ def infer_output_dir(self, key, relative=True, mkdir=True): - video_file = (FacemapTrainFileSet.VideoFile & key).fetch("file_path", limit=1)[0] + video_file = (FacemapTrainFileSet.VideoFile & key).fetch("video_file_path", limit=1)[0] video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) From d85c4a21e0d91a13cda7b0ae2a485bc7e77c5adf Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:29:34 -0500 Subject: [PATCH 091/182] set training task attribute values to None --- element_facemap/train_facial_model.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 2b10285..21423d5 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -216,12 +216,12 @@ class FacemapModelTrainingTask(dj.Manual): training_task_id : smallint --- train_output_dir : varchar(255) # Trained model output directory - refined_model_name='refined_model' : varchar(32) - model_id : smallint # Model index for insertion into FacemapModel table - retrain_model_id : smallint # Model index of model to be loaded for retraining - model_description : varchar(255) # Optional, model desc for insertion into FacemapModel - selected_frame_ind : blob # Array of frames to run training on - keypoints_filename : varchar(64) # Specify keypoints filename if multiple keypoints files are stored + refined_model_name='refined_model' : varchar(32) # Specify name of finetuned/trained model filepath + model_id=None : smallint # Model index for insertion into FacemapModel table + retrain_model_id=None : smallint # Model index of model to be loaded for retraining + model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel + selected_frame_ind=None : blob # Array of frames to run training on + keypoints_filename=None : varchar(64) # Specify keypoints filename if multiple keypoints files are stored """ def infer_output_dir(self, key, relative=True, mkdir=True): video_file = (FacemapTrainFileSet.VideoFile & key).fetch("video_file_path", limit=1)[0] @@ -306,10 +306,10 @@ def make(self, key): train_model = facemap_pose.Pose(filename=[video_files]) train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox - if len(key['retrain_model_id']) > 0: # Retrain an existing model from the facemap_pose.FacemapModel table + if len((FacemapModelTrainingTask & key).fetch1('refined_model_id')) > 0: # Retrain an existing model from the facemap_pose.FacemapModel table + # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() model_file = (facemap_pose.FacemapModel.File & {'model_id': key['retrain_model_id']}).fetch1("model_file") - # Set train_model object to load preexisting model train_model.model_name = model_file # Overwrite default train_model.net From fa04fa448f45f9d944248abfd476d5e7ca948549 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:34:48 -0500 Subject: [PATCH 092/182] update FacemapModelTrainingTask comment --- element_facemap/train_facial_model.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 21423d5..f0ad7b0 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -207,6 +207,10 @@ class FacemapModelTrainingTask(dj.Manual): train_output_dir( varchar(255) ): Relative output directory for trained model refined_model_name ( varchar(32) ): Name for retrained model model_id (smallint): Unique Model index to be inserted into FacemapModel table + retrain_model_id (smallint): Model index to query FacemapModel table to link model.net + model_description ( varchar(255) ): Optional. Model Description for insertion into FacemapModel + selected_frame_ind (blob) : Array of frames to run training on, if not specified all frames used. + keypoints_filename ( varchar(64) ): Optional. Name of specific keypoints file if multiple """ @@ -310,10 +314,13 @@ def make(self, key): # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() model_file = (facemap_pose.FacemapModel.File & {'model_id': key['retrain_model_id']}).fetch1("model_file") + # Set train_model object to load preexisting model train_model.model_name = model_file + # Overwrite default train_model.net train_model.net.load_state_dict(torch.load(model_file, map_location=train_model.device)) + # link model to torch device train_model.net.to(train_model.device) From f2ee0eaa72ec7eac5d2401e5e7fc853e33c0bbdd Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:38:58 -0500 Subject: [PATCH 093/182] change blob from none type --- element_facemap/train_facial_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index f0ad7b0..6db930e 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -220,11 +220,11 @@ class FacemapModelTrainingTask(dj.Manual): training_task_id : smallint --- train_output_dir : varchar(255) # Trained model output directory + selected_frame_ind : blob # Array of frames to run training on refined_model_name='refined_model' : varchar(32) # Specify name of finetuned/trained model filepath model_id=None : smallint # Model index for insertion into FacemapModel table retrain_model_id=None : smallint # Model index of model to be loaded for retraining model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel - selected_frame_ind=None : blob # Array of frames to run training on keypoints_filename=None : varchar(64) # Specify keypoints filename if multiple keypoints files are stored """ def infer_output_dir(self, key, relative=True, mkdir=True): @@ -249,7 +249,7 @@ def insert_facemap_training_task(cls, paramset_idx, refined_model_name='refined_model', model_description=None, - selected_frame_ind=None, + selected_frame_ind=[], keypoints_filename="", model_id=None, retrain_model_id=None): From 3727884136b69f5fb231c599ab9da5fc352cda1e Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:42:22 -0500 Subject: [PATCH 094/182] set smallints to null default value --- element_facemap/train_facial_model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 6db930e..4a40ebf 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -220,10 +220,10 @@ class FacemapModelTrainingTask(dj.Manual): training_task_id : smallint --- train_output_dir : varchar(255) # Trained model output directory - selected_frame_ind : blob # Array of frames to run training on refined_model_name='refined_model' : varchar(32) # Specify name of finetuned/trained model filepath - model_id=None : smallint # Model index for insertion into FacemapModel table - retrain_model_id=None : smallint # Model index of model to be loaded for retraining + model_id=null : smallint # Model index for insertion into FacemapModel table + retrain_model_id=null : smallint # Model index of model to be loaded for retraining + selected_frame_ind=null : blob # Array of frames to run training on model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel keypoints_filename=None : varchar(64) # Specify keypoints filename if multiple keypoints files are stored """ From a2df523a433a94d2c8bb7d2b1bf82afbdefd4175 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:50:52 -0500 Subject: [PATCH 095/182] update renamed variable file_set_id in FacemapModelTraining --- element_facemap/train_facial_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 4a40ebf..663b88c 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -293,12 +293,12 @@ def make(self, key): video_files = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() for fp in (FacemapTrainFileSet.VideoFile & - {'video_set_id': key['video_set_id']}).fetch("file_path")] + {'file_set_id': key['file_set_id']}).fetch("file_path")] # manually specified .h5 keypoints file train_fileset = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() for fp in (FacemapTrainFileSet.File & - {'file_set_id': key['video_set_id']}).fetch("file_path")] + {'file_set_id': key['file_set_id']}).fetch("file_path")] keypoints_file_name = (FacemapModelTrainingTask & key).fetch1("keypoints_filename") From db2a90e4167acf27304a20fa9a2823953e3e6d39 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:52:58 -0500 Subject: [PATCH 096/182] fix typo --- element_facemap/train_facial_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 663b88c..e1ae196 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -293,7 +293,7 @@ def make(self, key): video_files = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() for fp in (FacemapTrainFileSet.VideoFile & - {'file_set_id': key['file_set_id']}).fetch("file_path")] + {'file_set_id': key['file_set_id']}).fetch("video_file_path")] # manually specified .h5 keypoints file train_fileset = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() From 952e01ee301ea7c3009d760cff6149d400cafa75 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 3 Oct 2023 21:55:08 -0500 Subject: [PATCH 097/182] change File to KeypointsFile in ref --- element_facemap/train_facial_model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index e1ae196..e673e4d 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -296,13 +296,13 @@ def make(self, key): {'file_set_id': key['file_set_id']}).fetch("video_file_path")] # manually specified .h5 keypoints file - train_fileset = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() - for fp in (FacemapTrainFileSet.File & + keypoints_fileset = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() + for fp in (FacemapTrainFileSet.KeypointsFile & {'file_set_id': key['file_set_id']}).fetch("file_path")] keypoints_file_name = (FacemapModelTrainingTask & key).fetch1("keypoints_filename") - keypoints_file = [f for f in train_fileset if keypoints_file_name in f] + keypoints_file = [f for f in keypoints_fileset if keypoints_file_name in f] if len(keypoints_file) > 0: keypoints_file = keypoints_file[0] # if multiple keypoints files are specified, select first file From 7d38b518dea5cffb00624978bfe48375e113e2c9 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 4 Oct 2023 15:43:33 -0500 Subject: [PATCH 098/182] update facemap pose object to be named pose, and facemap_pose will refer to imported schema --- element_facemap/train_facial_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index e673e4d..c1cdcae 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -284,7 +284,7 @@ class FacemapModelTraining(dj.Computed): """ def make(self, key): - from facemap.pose import pose as facemap_pose + from facemap.pose import pose from facemap import utils import torch @@ -307,7 +307,7 @@ def make(self, key): keypoints_file = keypoints_file[0] # if multiple keypoints files are specified, select first file # Create a pose model object, specifying the video files - train_model = facemap_pose.Pose(filename=[video_files]) + train_model = pose.Pose(filename=[video_files]) train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox if len((FacemapModelTrainingTask & key).fetch1('refined_model_id')) > 0: # Retrain an existing model from the facemap_pose.FacemapModel table From 860ffe935e4c749888f171dba143b8f3ab5a5025 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 4 Oct 2023 15:45:23 -0500 Subject: [PATCH 099/182] fix typo --- element_facemap/train_facial_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index c1cdcae..c20dc10 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -307,7 +307,7 @@ def make(self, key): keypoints_file = keypoints_file[0] # if multiple keypoints files are specified, select first file # Create a pose model object, specifying the video files - train_model = pose.Pose(filename=[video_files]) + train_model = pose.Pose(filenames=[video_files]) train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox if len((FacemapModelTrainingTask & key).fetch1('refined_model_id')) > 0: # Retrain an existing model from the facemap_pose.FacemapModel table From e574b0a2bc4b9be403276779c1a1dcc20cee7fe6 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 4 Oct 2023 17:11:49 -0500 Subject: [PATCH 100/182] rename refined_model_id to retrain_model_id --- element_facemap/train_facial_model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index c20dc10..26d1f56 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -309,8 +309,9 @@ def make(self, key): # Create a pose model object, specifying the video files train_model = pose.Pose(filenames=[video_files]) train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox + retrain_model_id = (FacemapModelTrainingTask & key).fetch1('retrain_model_id') - if len((FacemapModelTrainingTask & key).fetch1('refined_model_id')) > 0: # Retrain an existing model from the facemap_pose.FacemapModel table + if retrain_model_id is not None: # Retrain an existing model from the facemap_pose.FacemapModel table # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() model_file = (facemap_pose.FacemapModel.File & {'model_id': key['retrain_model_id']}).fetch1("model_file") From 141d3017f21df1edfa1ae50e45d338019d0ddcee Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 5 Oct 2023 11:43:30 -0500 Subject: [PATCH 101/182] change model_id fetch --- element_facemap/train_facial_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 26d1f56..e85158b 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -402,7 +402,7 @@ def make(self, key): model_output_path = output_dir / f'{refined_model_name}.pth' torch.save(train_model.net.state_dict(), model_output_path) - model_id = key['model_id'] + model_id = (FacemapModelTrainingTask & key).fetch1('model_id') model_description = (FacemapModelTrainingTask & key).fetch1('model_description') # Insert newly trained model results into FacemapModel table From 24e2f3b4ba851246866c8a0ea8f87dc92c807ba6 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 5 Oct 2023 11:47:39 -0500 Subject: [PATCH 102/182] fix model id none fetch --- element_facemap/train_facial_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index e85158b..14a0b02 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -408,7 +408,7 @@ def make(self, key): # Insert newly trained model results into FacemapModel table try: model_ids = facemap_pose.FacemapModel.fetch("model_id") - if len(model_id) == 0 or model_id in model_ids: + if model_id is None or model_id in model_ids: model_id = max(model_ids) + 1 except ValueError: # case that nothing has been inserted model_id = 0 From 8e45d6c6d2e0211436e98348b7dff7fa3819e45e Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 6 Oct 2023 15:29:16 -0500 Subject: [PATCH 103/182] store model reference in model training table --- element_facemap/train_facial_model.py | 33 ++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 14a0b02..2793018 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -268,7 +268,7 @@ def insert_facemap_training_task(cls, @schema class FacemapModelTraining(dj.Computed): - """Automated Model training information. + """Automated Model training Attributes: FacemapModelTrainingTask (foreign key): FacemapModelTrainingTask key. @@ -280,7 +280,7 @@ class FacemapModelTraining(dj.Computed): -> FacemapModelTrainingTask --- train_model_time : datetime # Time of creation of train model file - train_model : longblob # Dictionary containing model.net state + facemap_model_reference : smallint # Reference to index facemap_pose.FacemapModel table """ def make(self, key): @@ -365,7 +365,7 @@ def make(self, key): training_params = (FacemapTrainParamSet & f'paramset_idx={key["paramset_idx"]}').fetch1('params') refined_model_name = (FacemapModelTrainingTask & key).fetch1('refined_model_name') # default = "refined_model" - # Train model using train function defined in Pose class + # # Train model using train function defined in Pose class train_model.net = train_model.train(image_data[:,:,:,0], # note: using 0 index for now (could average across this dimension) keypoints_data.T, # needs to be transposed int(training_params['epochs']), @@ -376,7 +376,27 @@ def make(self, key): # Alternate (requires more imports, but allows for access to model_training object that can be used for cross validation) - # from facemap.pose import model_training, datasets + from facemap.pose import model_training, datasets + + + # Split dataset into train and test splits + + # # Splitting keypoints data + # dsplits = utils.split_data(X,Y,tcam,tneural) + # ( + # X_train, + # X_test, + # Y_train, + # Y_test, + # itrain_sample_b, + # itest_sample_b, + # itrain_sample, + # itest_sample, + # itrain, + # itest, + # ) = dsplits + + # # Splitting frames image data # dataset = datasets.FacemapDataset( # image_data=image_data, @@ -423,5 +443,6 @@ def make(self, key): ) self.insert1( - {**key, 'train_model_time': train_model_time, 'train_model': train_model.net.state_dict()} - ) + {**key, 'train_model_time': train_model_time, 'facemap_model_reference': model_id} + + ) \ No newline at end of file From 8ef54441c865ca5f6369876da40e304f620536ff Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 6 Oct 2023 15:34:22 -0500 Subject: [PATCH 104/182] bugfix --- element_facemap/train_facial_model.py | 68 +++++++++++++-------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 2793018..9292e03 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -314,7 +314,7 @@ def make(self, key): if retrain_model_id is not None: # Retrain an existing model from the facemap_pose.FacemapModel table # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() - model_file = (facemap_pose.FacemapModel.File & {'model_id': key['retrain_model_id']}).fetch1("model_file") + model_file = (facemap_pose.FacemapModel.File & {'model_id': retrain_model_id}).fetch1("model_file") # Set train_model object to load preexisting model train_model.model_name = model_file @@ -381,39 +381,39 @@ def make(self, key): # Split dataset into train and test splits - # # Splitting keypoints data - # dsplits = utils.split_data(X,Y,tcam,tneural) - # ( - # X_train, - # X_test, - # Y_train, - # Y_test, - # itrain_sample_b, - # itest_sample_b, - # itrain_sample, - # itest_sample, - # itrain, - # itest, - # ) = dsplits - - # # Splitting frames image data - - # dataset = datasets.FacemapDataset( - # image_data=image_data, - # keypoints_data=keypoints_data.T, - # bbox=training_params['bbox'], - # ) - # # Create a dataloader object for training - # dataloader = torch.utils.data.DataLoader( - # dataset, batch_size=int(training_params['batch_size']), shuffle=True - # ) - # # Use preprocessed data to train the model - # train_model.net = model_training.train( - # dataloader, - # train_model.net, - # int(training_params['epochs']), - # int(training_params['weight_decay']), - # ) + # Splitting keypoints data + dsplits = utils.split_data(X,Y,tcam,tneural) + ( + X_train, + X_test, + Y_train, + Y_test, + itrain_sample_b, + itest_sample_b, + itrain_sample, + itest_sample, + itrain, + itest, + ) = dsplits + + # Splitting frames image data + + dataset = datasets.FacemapDataset( + image_data=image_data, + keypoints_data=keypoints_data.T, + bbox=training_params['bbox'], + ) + # Create a dataloader object for training + dataloader = torch.utils.data.DataLoader( + dataset, batch_size=int(training_params['batch_size']), shuffle=True + ) + # Use preprocessed data to train the model + train_model.net = model_training.train( + dataloader, + train_model.net, + int(training_params['epochs']), + int(training_params['weight_decay']), + ) # pred_keypoints, keypoints = model_training.get_test_predictions(train_model.net, test_dataset) From 7a9041d6065ca9dd114b1e13ac00ca83b816eba4 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 6 Oct 2023 15:38:33 -0500 Subject: [PATCH 105/182] comment out alternate approach --- element_facemap/train_facial_model.py | 64 +++++++++++++-------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 9292e03..b175807 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -382,38 +382,38 @@ def make(self, key): # Split dataset into train and test splits # Splitting keypoints data - dsplits = utils.split_data(X,Y,tcam,tneural) - ( - X_train, - X_test, - Y_train, - Y_test, - itrain_sample_b, - itest_sample_b, - itrain_sample, - itest_sample, - itrain, - itest, - ) = dsplits - - # Splitting frames image data - - dataset = datasets.FacemapDataset( - image_data=image_data, - keypoints_data=keypoints_data.T, - bbox=training_params['bbox'], - ) - # Create a dataloader object for training - dataloader = torch.utils.data.DataLoader( - dataset, batch_size=int(training_params['batch_size']), shuffle=True - ) - # Use preprocessed data to train the model - train_model.net = model_training.train( - dataloader, - train_model.net, - int(training_params['epochs']), - int(training_params['weight_decay']), - ) + # dsplits = utils.split_data(X,Y,tcam,tneural) + # ( + # X_train, + # X_test, + # Y_train, + # Y_test, + # itrain_sample_b, + # itest_sample_b, + # itrain_sample, + # itest_sample, + # itrain, + # itest, + # ) = dsplits + + # # Splitting frames image data + + # dataset = datasets.FacemapDataset( + # image_data=image_data, + # keypoints_data=keypoints_data.T, + # bbox=training_params['bbox'], + # ) + # # Create a dataloader object for training + # dataloader = torch.utils.data.DataLoader( + # dataset, batch_size=int(training_params['batch_size']), shuffle=True + # ) + # # Use preprocessed data to train the model + # train_model.net = model_training.train( + # dataloader, + # train_model.net, + # int(training_params['epochs']), + # int(training_params['weight_decay']), + # ) # pred_keypoints, keypoints = model_training.get_test_predictions(train_model.net, test_dataset) From 29c2b59607b1d20d6e4ce3239e6aadcdf3e1e8bf Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 10 Oct 2023 14:00:53 -0500 Subject: [PATCH 106/182] modify to use save_model --- element_facemap/train_facial_model.py | 49 +++++++++++++++------------ 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index b175807..a9747ac 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -222,6 +222,7 @@ class FacemapModelTrainingTask(dj.Manual): train_output_dir : varchar(255) # Trained model output directory refined_model_name='refined_model' : varchar(32) # Specify name of finetuned/trained model filepath model_id=null : smallint # Model index for insertion into FacemapModel table + retrain_model_id=null : smallint # Model index of model to be loaded for retraining selected_frame_ind=null : blob # Array of frames to run training on model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel @@ -365,7 +366,7 @@ def make(self, key): training_params = (FacemapTrainParamSet & f'paramset_idx={key["paramset_idx"]}').fetch1('params') refined_model_name = (FacemapModelTrainingTask & key).fetch1('refined_model_name') # default = "refined_model" - # # Train model using train function defined in Pose class + # Train model using train function defined in Pose class train_model.net = train_model.train(image_data[:,:,:,0], # note: using 0 index for now (could average across this dimension) keypoints_data.T, # needs to be transposed int(training_params['epochs']), @@ -373,9 +374,13 @@ def make(self, key): float(training_params['learning_rate']), int(training_params['weight_decay']), bbox=training_params['bbox']) + testing_video_id = + if testing_video_id is not None: + train_model.predict_landmarks(testing_video_id, frame_ind=selected_frame_ind) - - # Alternate (requires more imports, but allows for access to model_training object that can be used for cross validation) + + + # Model Training with Cross Validation from facemap.pose import model_training, datasets @@ -398,29 +403,29 @@ def make(self, key): # # Splitting frames image data - # dataset = datasets.FacemapDataset( - # image_data=image_data, - # keypoints_data=keypoints_data.T, - # bbox=training_params['bbox'], - # ) - # # Create a dataloader object for training - # dataloader = torch.utils.data.DataLoader( - # dataset, batch_size=int(training_params['batch_size']), shuffle=True - # ) - # # Use preprocessed data to train the model - # train_model.net = model_training.train( - # dataloader, - # train_model.net, - # int(training_params['epochs']), - # int(training_params['weight_decay']), - # ) - - # pred_keypoints, keypoints = model_training.get_test_predictions(train_model.net, test_dataset) + dataset = datasets.FacemapDataset( + image_data=image_data, + keypoints_data=keypoints_data.T, + bbox=training_params['bbox'], + ) + # Create a dataloader object for training + dataloader = torch.utils.data.DataLoader( + dataset, batch_size=int(training_params['batch_size']), shuffle=True + ) + # Use preprocessed data to train the model + train_model.net = model_training.train( + dataloader, + train_model.net, + int(training_params['epochs']), + int(training_params['weight_decay']), + ) + + pred_keypoints, keypoints = model_training.get_test_predictions(train_model.net, dataset) # Save Refined Model model_output_path = output_dir / f'{refined_model_name}.pth' - torch.save(train_model.net.state_dict(), model_output_path) + train_model.save_model(model_output_path) model_id = (FacemapModelTrainingTask & key).fetch1('model_id') model_description = (FacemapModelTrainingTask & key).fetch1('model_description') From f44a3fdbaed0a2527ffd15f37e9c5986b4c8bd83 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Thu, 12 Oct 2023 13:23:06 -0500 Subject: [PATCH 107/182] revert cross val cahnges, switch to new branch --- element_facemap/train_facial_model.py | 44 ++++++++++++--------------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index a9747ac..5482a16 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -222,7 +222,6 @@ class FacemapModelTrainingTask(dj.Manual): train_output_dir : varchar(255) # Trained model output directory refined_model_name='refined_model' : varchar(32) # Specify name of finetuned/trained model filepath model_id=null : smallint # Model index for insertion into FacemapModel table - retrain_model_id=null : smallint # Model index of model to be loaded for retraining selected_frame_ind=null : blob # Array of frames to run training on model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel @@ -374,14 +373,9 @@ def make(self, key): float(training_params['learning_rate']), int(training_params['weight_decay']), bbox=training_params['bbox']) - testing_video_id = - if testing_video_id is not None: - train_model.predict_landmarks(testing_video_id, frame_ind=selected_frame_ind) - - # Model Training with Cross Validation - from facemap.pose import model_training, datasets + # from facemap.pose import model_training, datasets # Split dataset into train and test splits @@ -403,24 +397,24 @@ def make(self, key): # # Splitting frames image data - dataset = datasets.FacemapDataset( - image_data=image_data, - keypoints_data=keypoints_data.T, - bbox=training_params['bbox'], - ) - # Create a dataloader object for training - dataloader = torch.utils.data.DataLoader( - dataset, batch_size=int(training_params['batch_size']), shuffle=True - ) - # Use preprocessed data to train the model - train_model.net = model_training.train( - dataloader, - train_model.net, - int(training_params['epochs']), - int(training_params['weight_decay']), - ) - - pred_keypoints, keypoints = model_training.get_test_predictions(train_model.net, dataset) + # dataset = datasets.FacemapDataset( + # image_data=image_data, + # keypoints_data=keypoints_data.T, + # bbox=training_params['bbox'], + # ) + # # Create a dataloader object for training + # dataloader = torch.utils.data.DataLoader( + # dataset, batch_size=int(training_params['batch_size']), shuffle=True + # ) + # # Use preprocessed data to train the model + # train_model.net = model_training.train( + # dataloader, + # train_model.net, + # int(training_params['epochs']), + # int(training_params['weight_decay']), + # ) + + # pred_keypoints, keypoints = model_training.get_test_predictions(train_model.net, dataset) # Save Refined Model From 06ff66e60dbcd6fe8da92be2a6907db686411af5 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 10:54:11 -0500 Subject: [PATCH 108/182] update body part contents: --- element_facemap/facial_pose_model.py | 30 ++++++++++++++-------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index c6566c8..3566998 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -103,21 +103,21 @@ class BodyPart(dj.Lookup): # Facemap Default BodyPart list contents = [ - "eye(back)", - "eye(bottom)", - "eye(front)", - "eye(top)", - "lowerlip", - "mouth", - "nose(bottom)", - "nose(r)", - "nose(tip)", - "nose(top)", - "nosebridge", - "paw", - "whisker(I)", - "whisker(III)", - "whisker(II)", + ("eye(back)", ''), + ("eye(bottom)", ''), + ("eye(front)", ''), + ("eye(top)", ''), + ("lowerlip", ''), + ("mouth", ''), + ("nose(bottom)", ''), + ("nose(r)", ''), + ("nose(tip)", ''), + ("nose(top)", ''), + ("nosebridge", ''), + ("paw", ''), + ("whisker(I)", ''), + ("whisker(III)", ''), + ("whisker(II)", ''), ] @schema From 3fc3e2b97623a326b15376eb8ca8cbdaa4bae87b Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 14:33:51 -0500 Subject: [PATCH 109/182] fetch fileset paths using key --- element_facemap/train_facial_model.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 5482a16..99659c8 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -163,7 +163,7 @@ def insert_new_params( cls, paramset_desc: str, params: dict, paramset_idx: int = None ): """ - Insert a new set of training parameters into dlc.TrainingParamSet. + Insert a new set of training parameters into FacemapTrainParamSet. Args: paramset_desc (str): Description of parameter set to be inserted @@ -292,13 +292,11 @@ def make(self, key): output_dir = find_full_path(fbe.get_facemap_root_data_dir(), train_output_dir) video_files = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() - for fp in (FacemapTrainFileSet.VideoFile & - {'file_set_id': key['file_set_id']}).fetch("video_file_path")] + for fp in (FacemapTrainFileSet.VideoFile & key).fetch("video_file_path")] # manually specified .h5 keypoints file keypoints_fileset = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() - for fp in (FacemapTrainFileSet.KeypointsFile & - {'file_set_id': key['file_set_id']}).fetch("file_path")] + for fp in (FacemapTrainFileSet.KeypointsFile & key).fetch("file_path")] keypoints_file_name = (FacemapModelTrainingTask & key).fetch1("keypoints_filename") From a1f8d9cfbdddf0e65c2606eabe42afe2e1b8bcfd Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 14:34:51 -0500 Subject: [PATCH 110/182] remove alternate cv model training --- element_facemap/train_facial_model.py | 45 +-------------------------- 1 file changed, 1 insertion(+), 44 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index 99659c8..b97f414 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -371,50 +371,7 @@ def make(self, key): float(training_params['learning_rate']), int(training_params['weight_decay']), bbox=training_params['bbox']) - - # Model Training with Cross Validation - # from facemap.pose import model_training, datasets - - - # Split dataset into train and test splits - - # Splitting keypoints data - # dsplits = utils.split_data(X,Y,tcam,tneural) - # ( - # X_train, - # X_test, - # Y_train, - # Y_test, - # itrain_sample_b, - # itest_sample_b, - # itrain_sample, - # itest_sample, - # itrain, - # itest, - # ) = dsplits - - # # Splitting frames image data - - # dataset = datasets.FacemapDataset( - # image_data=image_data, - # keypoints_data=keypoints_data.T, - # bbox=training_params['bbox'], - # ) - # # Create a dataloader object for training - # dataloader = torch.utils.data.DataLoader( - # dataset, batch_size=int(training_params['batch_size']), shuffle=True - # ) - # # Use preprocessed data to train the model - # train_model.net = model_training.train( - # dataloader, - # train_model.net, - # int(training_params['epochs']), - # int(training_params['weight_decay']), - # ) - - # pred_keypoints, keypoints = model_training.get_test_predictions(train_model.net, dataset) - - + # Save Refined Model model_output_path = output_dir / f'{refined_model_name}.pth' train_model.save_model(model_output_path) From 617a6e970ed4516de5d077ac81585dd3d24a44b0 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 14:53:22 -0500 Subject: [PATCH 111/182] update comment --- element_facemap/train_facial_model.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index b97f414..f15c31c 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -273,8 +273,9 @@ class FacemapModelTraining(dj.Computed): Attributes: FacemapModelTrainingTask (foreign key): FacemapModelTrainingTask key. train_model_time (datetime): Time of creation of newly trained model - latest_snapshot (int unsigned): Latest exact snapshot index (i.e., never -1). - config_template (longblob): Stored full config file.""" + facemap_model_reference (smallint): Reference to index of facemap_pose.FacemapModel + + """ definition = """ -> FacemapModelTrainingTask From 9080a2efa43f2f9af89dcd11de29125b69368186 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 15:00:03 -0500 Subject: [PATCH 112/182] update keypoints loading bodyparts --- element_facemap/train_facial_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/train_facial_model.py b/element_facemap/train_facial_model.py index f15c31c..d129e2e 100644 --- a/element_facemap/train_facial_model.py +++ b/element_facemap/train_facial_model.py @@ -281,7 +281,7 @@ class FacemapModelTraining(dj.Computed): -> FacemapModelTrainingTask --- train_model_time : datetime # Time of creation of train model file - facemap_model_reference : smallint # Reference to index facemap_pose.FacemapModel table + facemap_model_reference : smallint # Reference to index FacemapModel table """ def make(self, key): @@ -358,7 +358,7 @@ def make(self, key): # LY, LX, sy, sx = utils.video_placement(Ly, Lx) # reshaped_videos = utils.multivideo_reshape(image_data, LY, LX, Ly, Lx, sy, sx) - keypoints_data = utils.load_keypoints(facemap_pose.BodyPart.contents, keypoints_file) + keypoints_data = utils.load_keypoints(list(zip(*facemap_pose.BodyPart.contents))[0], keypoints_file) # Model Parameters (fetch from TrainingParamSet as dict) training_params = (FacemapTrainParamSet & f'paramset_idx={key["paramset_idx"]}').fetch1('params') From 3af4495e101ce64ff1d9e30f4948b87aa8ae860b Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 15:02:45 -0500 Subject: [PATCH 113/182] update comments --- element_facemap/facial_pose_model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 3566998..a5749dc 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -230,9 +230,9 @@ def generate(cls, key, model_id: int, relative_video_paths: list, task_mode: str Args: model_id (int): User Specified model identification number session_key (dict): - relative_video_paths (list): _description_ - task_mode (str, optional): _description_. Defaults to "trigger". - bbox (list, optional): _description_. Defaults to []. + relative_video_paths (list): list of relative videos in VideoRecording.File table + task_mode (str, optional): Load or Trigger. Defaults to "trigger". + bbox (list, optional): Bounding box for processing. Defaults to []. """ video_paths = [find_full_path(fbe.get_facemap_root_data_dir(), rpath) for rpath in relative_video_paths] for vid_path in video_paths: From 985fe5fead258d451b509ab7810ec2a7963f1ed7 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 17:37:43 -0500 Subject: [PATCH 114/182] update model insertion --- element_facemap/facial_pose_model.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index a5749dc..e206fcb 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -163,22 +163,19 @@ class File(dj.Part): """ @classmethod def insert_new_model(cls, model_id: int, model_name: str, model_description: str, full_model_path: str): - facemap_model_insert = dict( + facemap_model_entry = dict( model_id=model_id, model_name=model_name, model_description=model_description ) - FacemapModel.insert1(facemap_model_insert) + FacemapModel.insert1(facemap_model_entry) - body_part_insert = [] - for bp in BodyPart.contents: - body_part_insert.append(dict(model_id=model_id, body_part=bp)) + body_part_entry = [] + for bp in BodyPart.fetch('body_part'): + body_part_entry.append(dict(model_id=model_id, body_part=bp)) - # Insert into parent BodyPart table if no entries are present - if len(cls.BodyPart()) == 0: - cls.BodyPart.insert(body_part_insert) - file_insert = dict(model_id=model_id, model_file=full_model_path) + file_entry = dict(model_id=model_id, model_file=full_model_path) - cls.BodyPart.insert(body_part_insert) - cls.File.insert1(file_insert) + cls.BodyPart.insert(body_part_entry) + cls.File.insert1(file_entry) @schema class FacemapPoseEstimationTask(dj.Manual): From 2b5595190510c71f0d22b8b47e3bb02d5b89c501 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 17:38:08 -0500 Subject: [PATCH 115/182] set bbox to null --- element_facemap/facial_pose_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index e206fcb..aeb3d49 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -198,7 +198,7 @@ class FacemapPoseEstimationTask(dj.Manual): --- pose_estimation_output_dir='' : varchar(255) # output dir - stores results of Facemap Pose estimation analysis task_mode='trigger' : enum('load', 'trigger') - bbox=None : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] + bbox=null : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] task_description='' : varchar(128) """ From b9a69ab2b6341b277622cb938b1f7fc7762c2df5 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 17:39:08 -0500 Subject: [PATCH 116/182] update load trigger commentA --- element_facemap/facial_pose_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index aeb3d49..2152732 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -186,7 +186,7 @@ class FacemapPoseEstimationTask(dj.Manual): FacemapModel (foreign key) : Primary key for the facemap model table pose_estimation_output_dir ( varchar(255), optional) : output dir storing the results of pose estimation analysis. - task_mode (enum) : Default trigger. Load or trigger analysis. + task_mode (enum) : Default trigger. 'load' or 'trigger' analysis. bbox (longblob) : Bounding box for cropping the video [x1, x2, y1, y2]. If not set, entire frame is used. task_description ( varchar(128), optional) : Task description. """ @@ -228,7 +228,7 @@ def generate(cls, key, model_id: int, relative_video_paths: list, task_mode: str model_id (int): User Specified model identification number session_key (dict): relative_video_paths (list): list of relative videos in VideoRecording.File table - task_mode (str, optional): Load or Trigger. Defaults to "trigger". + task_mode (str, optional): 'load' or 'trigger. Defaults to 'trigger'. bbox (list, optional): Bounding box for processing. Defaults to []. """ video_paths = [find_full_path(fbe.get_facemap_root_data_dir(), rpath) for rpath in relative_video_paths] From b7c889a6d38fac2f3f573ccbfb78d67dfedf76d2 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 17:50:40 -0500 Subject: [PATCH 117/182] update task generation to be based on vrec_key --- element_facemap/facial_pose_model.py | 36 +++++++++++++--------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 2152732..14c9195 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -221,7 +221,7 @@ def infer_output_dir(cls, key, relative=True, mkdir=True): @classmethod - def generate(cls, key, model_id: int, relative_video_paths: list, task_mode: str = "trigger", bbox: list = []): + def generate(cls, key, model_id: int, task_mode: str = "trigger", bbox: list = []): """Generate a unique pose estimation task for each of the relative_video_paths Args: @@ -231,24 +231,22 @@ def generate(cls, key, model_id: int, relative_video_paths: list, task_mode: str task_mode (str, optional): 'load' or 'trigger. Defaults to 'trigger'. bbox (list, optional): Bounding box for processing. Defaults to []. """ - video_paths = [find_full_path(fbe.get_facemap_root_data_dir(), rpath) for rpath in relative_video_paths] - for vid_path in video_paths: - device_id = (fbe.VideoRecording & key).fetch('device_id') - vrec_key = (fbe.VideoRecording & key).fetch('key') - - model_key = (FacemapModel & f"model_id={model_id}").fetch1("KEY") - pose_estimation_output_dir = cls.infer_output_dir(vrec_key) - - facemap_pose_estimation_task_insert = { - **vrec_key, - **model_key, - "pose_estimation_output_dir": pose_estimation_output_dir, - "task_mode": task_mode, - "bbox": bbox, - } - cls.insert1( - facemap_pose_estimation_task_insert - ) + device_id = (fbe.VideoRecording & key).fetch('device_id') + vrec_key = (fbe.VideoRecording & key).fetch('key') + + model_key = (FacemapModel & f"model_id={model_id}").fetch1("KEY") + pose_estimation_output_dir = cls.infer_output_dir(vrec_key) + + facemap_pose_estimation_task_insert = { + **vrec_key, + **model_key, + "pose_estimation_output_dir": pose_estimation_output_dir, + "task_mode": task_mode, + "bbox": bbox, + } + cls.insert1( + facemap_pose_estimation_task_insert + ) insert_pose_estimation_task = generate @schema From 4da5995075e55e1e322e9bf9f28a9c8be1c27bae Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 17:54:56 -0500 Subject: [PATCH 118/182] update load, remove check that files exist, now FnF error will be thrown --- element_facemap/facial_pose_model.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facial_pose_model.py index 14c9195..d10fe41 100644 --- a/element_facemap/facial_pose_model.py +++ b/element_facemap/facial_pose_model.py @@ -380,15 +380,13 @@ def make(self, key): ) = _load_facemap_results(key, facemap_result_path, full_metadata_path) elif task_mode == "load": - if ( - facemap_result_path.exists() & full_metadata_path.exists() - ): # Load preprocessed inference results - ( - body_part_position_entry, - inference_duration, - total_frame_count, - creation_time, - ) = _load_facemap_results(key, facemap_result_path, full_metadata_path) + # Load preprocessed inference results + ( + body_part_position_entry, + inference_duration, + total_frame_count, + creation_time, + ) = _load_facemap_results(key, facemap_result_path, full_metadata_path) self.insert1( { From d85300fb958f6d895596920fb89ff9ccb47a2183 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 18:04:32 -0500 Subject: [PATCH 119/182] rename files --- element_facemap/{facial_pose_model.py => facemap_inference.py} | 0 element_facemap/{train_facial_model.py => facemap_train.py} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename element_facemap/{facial_pose_model.py => facemap_inference.py} (100%) rename element_facemap/{train_facial_model.py => facemap_train.py} (100%) diff --git a/element_facemap/facial_pose_model.py b/element_facemap/facemap_inference.py similarity index 100% rename from element_facemap/facial_pose_model.py rename to element_facemap/facemap_inference.py diff --git a/element_facemap/train_facial_model.py b/element_facemap/facemap_train.py similarity index 100% rename from element_facemap/train_facial_model.py rename to element_facemap/facemap_train.py From 15e3b7851ba48b2cd5e61b5f00c743b6793314d8 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 18:06:52 -0500 Subject: [PATCH 120/182] change facemap_pose to facemap_inference --- element_facemap/facemap_train.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index d129e2e..c812ab7 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -8,7 +8,7 @@ from element_interface.utils import find_full_path, dict_to_uuid, find_root_directory from . import facial_behavior_estimation as fbe -from . import facial_pose_model as facemap_pose +from . import facemap_inference schema = dj.schema() @@ -31,7 +31,7 @@ def activate( the `facemap_train` schema of element-facemap fbe_schema_name (str): Schema name on the database server to activate the 'facial_behavioral_estimation facemap_model_schema_name (str): Schema name on the database server to activate the - `facemap_pose_model` schema of element-facemap + `facemap_inference` schema of element-facemap create_schema (bool): when True (default), create schema in the database if it does not yet exist. create_tables (bool): when True (default), create schema tables in the database @@ -76,7 +76,7 @@ def activate( ) # activate facial pose model schema - facemap_pose.activate( + facemap_inference.activate( facemap_model_schema_name, create_schema=create_schema, create_tables=create_tables, @@ -273,7 +273,7 @@ class FacemapModelTraining(dj.Computed): Attributes: FacemapModelTrainingTask (foreign key): FacemapModelTrainingTask key. train_model_time (datetime): Time of creation of newly trained model - facemap_model_reference (smallint): Reference to index of facemap_pose.FacemapModel + facemap_model_reference (smallint): Reference to index of facemap_inference.FacemapModel """ @@ -310,10 +310,10 @@ def make(self, key): train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox retrain_model_id = (FacemapModelTrainingTask & key).fetch1('retrain_model_id') - if retrain_model_id is not None: # Retrain an existing model from the facemap_pose.FacemapModel table + if retrain_model_id is not None: # Retrain an existing model from the facemap_inference.FacemapModel table # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() - model_file = (facemap_pose.FacemapModel.File & {'model_id': retrain_model_id}).fetch1("model_file") + model_file = (facemap_inference.FacemapModel.File & {'model_id': retrain_model_id}).fetch1("model_file") # Set train_model object to load preexisting model train_model.model_name = model_file @@ -358,7 +358,7 @@ def make(self, key): # LY, LX, sy, sx = utils.video_placement(Ly, Lx) # reshaped_videos = utils.multivideo_reshape(image_data, LY, LX, Ly, Lx, sy, sx) - keypoints_data = utils.load_keypoints(list(zip(*facemap_pose.BodyPart.contents))[0], keypoints_file) + keypoints_data = utils.load_keypoints(list(zip(*facemap_inference.BodyPart.contents))[0], keypoints_file) # Model Parameters (fetch from TrainingParamSet as dict) training_params = (FacemapTrainParamSet & f'paramset_idx={key["paramset_idx"]}').fetch1('params') @@ -382,13 +382,13 @@ def make(self, key): # Insert newly trained model results into FacemapModel table try: - model_ids = facemap_pose.FacemapModel.fetch("model_id") + model_ids = facemap_inference.FacemapModel.fetch("model_id") if model_id is None or model_id in model_ids: model_id = max(model_ids) + 1 except ValueError: # case that nothing has been inserted model_id = 0 - facemap_pose.FacemapModel().insert_new_model(model_id, + facemap_inference.FacemapModel().insert_new_model(model_id, refined_model_name, model_description, model_output_path) From 8dce7358953209d87e304e41bc68467c489756b1 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 18:08:18 -0500 Subject: [PATCH 121/182] mkae keypointsFile part table a 1 to 1 mapping --- element_facemap/facemap_train.py | 1 - 1 file changed, 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index c812ab7..4dcf08f 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -131,7 +131,6 @@ class KeypointsFile(dj.Part): definition = """ # Paths of training files (e.g.: .h5 keypoints data file) -> master - file_id: int --- file_path: varchar(255) """ From 20be2cd1007ccb7d0b09167390893fc972f020bc Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 18:08:58 -0500 Subject: [PATCH 122/182] update comment --- element_facemap/facemap_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 4dcf08f..eb0833c 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -138,7 +138,7 @@ class KeypointsFile(dj.Part): @schema class FacemapTrainParamSet(dj.Lookup): - """Parameters used to train a model, initial ROIs from (_proc.npy) + """Parameters used to train a model Attributes: paramset_idx (smallint): Index uniqely identifying each paramset. From 1025a8b3dba9a868f7fd90fca7fa350c3d428343 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 23 Oct 2023 18:17:00 -0500 Subject: [PATCH 123/182] refactor pose estimation task definition, remove unneeded attributes --- element_facemap/facemap_train.py | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index eb0833c..0e9be1d 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -205,11 +205,8 @@ class FacemapModelTrainingTask(dj.Manual): training_task_id (int): Unique ID for training task. train_output_dir( varchar(255) ): Relative output directory for trained model refined_model_name ( varchar(32) ): Name for retrained model - model_id (smallint): Unique Model index to be inserted into FacemapModel table - retrain_model_id (smallint): Model index to query FacemapModel table to link model.net + retrain_model_id (smallint): Model index, of FacemapModel table, to be used for retraining model_description ( varchar(255) ): Optional. Model Description for insertion into FacemapModel - selected_frame_ind (blob) : Array of frames to run training on, if not specified all frames used. - keypoints_filename ( varchar(64) ): Optional. Name of specific keypoints file if multiple """ @@ -220,11 +217,8 @@ class FacemapModelTrainingTask(dj.Manual): --- train_output_dir : varchar(255) # Trained model output directory refined_model_name='refined_model' : varchar(32) # Specify name of finetuned/trained model filepath - model_id=null : smallint # Model index for insertion into FacemapModel table - retrain_model_id=null : smallint # Model index of model to be loaded for retraining - selected_frame_ind=null : blob # Array of frames to run training on + -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id') model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel - keypoints_filename=None : varchar(64) # Specify keypoints filename if multiple keypoints files are stored """ def infer_output_dir(self, key, relative=True, mkdir=True): video_file = (FacemapTrainFileSet.VideoFile & key).fetch("video_file_path", limit=1)[0] @@ -248,9 +242,6 @@ def insert_facemap_training_task(cls, paramset_idx, refined_model_name='refined_model', model_description=None, - selected_frame_ind=[], - keypoints_filename="", - model_id=None, retrain_model_id=None): key = {"file_set_id": file_set_id, "paramset_idx": paramset_idx} inferred_output_dir = cls().infer_output_dir(key, relative=True, mkdir=True) @@ -258,10 +249,7 @@ def insert_facemap_training_task(cls, training_task_id=training_task_id, train_output_dir=inferred_output_dir.as_posix(), refined_model_name=refined_model_name, - selected_frame_ind=selected_frame_ind, model_description=model_description, - keypoints_filename=keypoints_filename, - model_id=model_id, retrain_model_id=retrain_model_id) cls.insert1(facemap_training_task_insert) @@ -295,17 +283,14 @@ def make(self, key): for fp in (FacemapTrainFileSet.VideoFile & key).fetch("video_file_path")] # manually specified .h5 keypoints file - keypoints_fileset = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() + keypoints_file = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() for fp in (FacemapTrainFileSet.KeypointsFile & key).fetch("file_path")] - keypoints_file_name = (FacemapModelTrainingTask & key).fetch1("keypoints_filename") - - keypoints_file = [f for f in keypoints_fileset if keypoints_file_name in f] if len(keypoints_file) > 0: keypoints_file = keypoints_file[0] # if multiple keypoints files are specified, select first file # Create a pose model object, specifying the video files - train_model = pose.Pose(filenames=[video_files]) + train_model = pose.Pose(filenames=[video_files]) # facemap expects list of list! train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox retrain_model_id = (FacemapModelTrainingTask & key).fetch1('retrain_model_id') From 7461543f683fe3b27530055eb65fd621b480a215 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:01:17 -0500 Subject: [PATCH 124/182] Update element_facemap/facemap_inference.py Co-authored-by: Kushal Bakshi <52367253+kushalbakshi@users.noreply.github.com> --- element_facemap/facemap_inference.py | 1 + 1 file changed, 1 insertion(+) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index d10fe41..c7c8edd 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -13,6 +13,7 @@ import pickle import shutil from . import facial_behavior_estimation as fbe +from .facial_behavior_estimation import get_facemap_root_data_dir, get_facemap_processed_data_dir schema = dj.schema() From a55784fdf92d4ed7f935d026091e5dd707213ee1 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:01:31 -0500 Subject: [PATCH 125/182] Update element_facemap/facemap_inference.py Co-authored-by: Kushal Bakshi <52367253+kushalbakshi@users.noreply.github.com> --- element_facemap/facemap_inference.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index c7c8edd..63bea27 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -164,10 +164,13 @@ class File(dj.Part): """ @classmethod def insert_new_model(cls, model_id: int, model_name: str, model_description: str, full_model_path: str): - facemap_model_entry = dict( - model_id=model_id, model_name=model_name, model_description=model_description + self.insert1( + dict( + model_id=model_id, + model_name=model_name, + model_description=model_description, + ) ) - FacemapModel.insert1(facemap_model_entry) body_part_entry = [] for bp in BodyPart.fetch('body_part'): From 00370dd4d163c10a9f270fb3f488aa97e708246c Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:01:40 -0500 Subject: [PATCH 126/182] Update element_facemap/facemap_inference.py Co-authored-by: Kushal Bakshi <52367253+kushalbakshi@users.noreply.github.com> --- element_facemap/facemap_inference.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index 63bea27..8b44acc 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -172,9 +172,14 @@ def insert_new_model(cls, model_id: int, model_name: str, model_description: str ) ) - body_part_entry = [] - for bp in BodyPart.fetch('body_part'): - body_part_entry.append(dict(model_id=model_id, body_part=bp)) + cls.BodyPart.insert( + [ + dict( + model_id=model_id, + body_part=part, + ) for part in BodyPart.fetch("body_part") + ] + ) file_entry = dict(model_id=model_id, model_file=full_model_path) From 1f4377249dbbb73d4d2c666bcab80d9ccd64582c Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:01:53 -0500 Subject: [PATCH 127/182] Update element_facemap/facemap_inference.py Co-authored-by: Kushal Bakshi <52367253+kushalbakshi@users.noreply.github.com> --- element_facemap/facemap_inference.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index 8b44acc..173d02f 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -181,7 +181,12 @@ def insert_new_model(cls, model_id: int, model_name: str, model_description: str ] ) - file_entry = dict(model_id=model_id, model_file=full_model_path) + cls.File.insert1( + dict( + model_id=model_id, + model_file=full_model_path, + ), + ) cls.BodyPart.insert(body_part_entry) cls.File.insert1(file_entry) From e1c79fa426a6da88107b527c987375b2b63bc360 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:02:19 -0500 Subject: [PATCH 128/182] Update element_facemap/facemap_inference.py Co-authored-by: Kushal Bakshi <52367253+kushalbakshi@users.noreply.github.com> --- element_facemap/facemap_inference.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index 173d02f..fcfeff2 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -188,8 +188,6 @@ def insert_new_model(cls, model_id: int, model_name: str, model_description: str ), ) - cls.BodyPart.insert(body_part_entry) - cls.File.insert1(file_entry) @schema class FacemapPoseEstimationTask(dj.Manual): From 6fe0c8e580fe7b953a16d915c2f553fe1360edb4 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:02:32 -0500 Subject: [PATCH 129/182] Update element_facemap/facemap_inference.py Co-authored-by: Kushal Bakshi <52367253+kushalbakshi@users.noreply.github.com> --- element_facemap/facemap_inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index fcfeff2..3434f8a 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -208,7 +208,7 @@ class FacemapPoseEstimationTask(dj.Manual): -> fbe.VideoRecording -> FacemapModel --- - pose_estimation_output_dir='' : varchar(255) # output dir - stores results of Facemap Pose estimation analysis + pose_estimation_output_dir : varchar(255) # output dir - stores results of Facemap Pose estimation analysis task_mode='trigger' : enum('load', 'trigger') bbox=null : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] task_description='' : varchar(128) From 0b7f339095a818f1bc73c1c6bbc82380d5a6756e Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:23:54 -0500 Subject: [PATCH 130/182] Update element_facemap/facemap_inference.py Co-authored-by: Kushal Bakshi <52367253+kushalbakshi@users.noreply.github.com> --- element_facemap/facemap_inference.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index 3434f8a..59c79d4 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -217,8 +217,8 @@ class FacemapPoseEstimationTask(dj.Manual): @classmethod def infer_output_dir(cls, key, relative=True, mkdir=True): video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0] - video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent - root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) + video_dir = find_full_path(get_facemap_root_data_dir(), video_file).parent + root_dir = find_root_directory(get_facemap_root_data_dir(), video_dir) model_id = (FacemapPoseEstimationTask & key).fetch1("model_id") processed_dir = Path(fbe.get_facemap_processed_data_dir()) From 8846b3d6cff050ee50c2da9bfe2b59dfabc031b5 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:24:05 -0500 Subject: [PATCH 131/182] Update element_facemap/facemap_inference.py Co-authored-by: Kushal Bakshi <52367253+kushalbakshi@users.noreply.github.com> --- element_facemap/facemap_inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index 59c79d4..5a5a5ed 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -221,7 +221,7 @@ def infer_output_dir(cls, key, relative=True, mkdir=True): root_dir = find_root_directory(get_facemap_root_data_dir(), video_dir) model_id = (FacemapPoseEstimationTask & key).fetch1("model_id") - processed_dir = Path(fbe.get_facemap_processed_data_dir()) + processed_dir = Path(get_facemap_processed_data_dir()) output_dir = ( processed_dir / video_dir.relative_to(root_dir) / f"facemap_{model_id}" ) From 48034d7b5937ab24bcbb70a62d2e45d861e125ab Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:35:11 -0500 Subject: [PATCH 132/182] clean u[ load trigger logic to reduce _load_facemap_result function calls --- element_facemap/facemap_inference.py | 144 ++++++++++++--------------- 1 file changed, 65 insertions(+), 79 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index d10fe41..32d1e84 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -100,32 +100,33 @@ class BodyPart(dj.Lookup): --- body_part_description='' : varchar(1000) """ - + # Facemap Default BodyPart list - contents = [ - ("eye(back)", ''), - ("eye(bottom)", ''), - ("eye(front)", ''), - ("eye(top)", ''), - ("lowerlip", ''), - ("mouth", ''), - ("nose(bottom)", ''), - ("nose(r)", ''), - ("nose(tip)", ''), - ("nose(top)", ''), - ("nosebridge", ''), - ("paw", ''), - ("whisker(I)", ''), - ("whisker(III)", ''), - ("whisker(II)", ''), + contents = [ + ("eye(back)", ""), + ("eye(bottom)", ""), + ("eye(front)", ""), + ("eye(top)", ""), + ("lowerlip", ""), + ("mouth", ""), + ("nose(bottom)", ""), + ("nose(r)", ""), + ("nose(tip)", ""), + ("nose(top)", ""), + ("nosebridge", ""), + ("paw", ""), + ("whisker(I)", ""), + ("whisker(III)", ""), + ("whisker(II)", ""), ] - + + @schema class FacemapModel(dj.Manual): """Trained Models stored for facial pose inference Attributes: - model_id(int) : User specified ID associated with a unique model + model_id(int) : User specified ID associated with a unique model model_name( varchar(64) ): Name of model, filepath.stem """ @@ -161,22 +162,32 @@ class File(dj.Part): --- model_file: attach # model file attachment """ + @classmethod - def insert_new_model(cls, model_id: int, model_name: str, model_description: str, full_model_path: str): + def insert_new_model( + cls, + model_id: int, + model_name: str, + model_description: str, + full_model_path: str, + ): facemap_model_entry = dict( - model_id=model_id, model_name=model_name, model_description=model_description + model_id=model_id, + model_name=model_name, + model_description=model_description, ) FacemapModel.insert1(facemap_model_entry) body_part_entry = [] - for bp in BodyPart.fetch('body_part'): + for bp in BodyPart.fetch("body_part"): body_part_entry.append(dict(model_id=model_id, body_part=bp)) - + file_entry = dict(model_id=model_id, model_file=full_model_path) cls.BodyPart.insert(body_part_entry) cls.File.insert1(file_entry) + @schema class FacemapPoseEstimationTask(dj.Manual): """Staging table for pairing of video recordings and Facemap parameters before processing. @@ -219,20 +230,19 @@ def infer_output_dir(cls, key, relative=True, mkdir=True): return output_dir.relative_to(processed_dir) if relative else output_dir - @classmethod def generate(cls, key, model_id: int, task_mode: str = "trigger", bbox: list = []): """Generate a unique pose estimation task for each of the relative_video_paths Args: - model_id (int): User Specified model identification number - session_key (dict): + model_id (int): User Specified model identification number + session_key (dict): relative_video_paths (list): list of relative videos in VideoRecording.File table task_mode (str, optional): 'load' or 'trigger. Defaults to 'trigger'. bbox (list, optional): Bounding box for processing. Defaults to []. """ - device_id = (fbe.VideoRecording & key).fetch('device_id') - vrec_key = (fbe.VideoRecording & key).fetch('key') + device_id = (fbe.VideoRecording & key).fetch("device_id") + vrec_key = (fbe.VideoRecording & key).fetch("key") model_key = (FacemapModel & f"model_id={model_id}").fetch1("KEY") pose_estimation_output_dir = cls.infer_output_dir(vrec_key) @@ -244,11 +254,11 @@ def generate(cls, key, model_id: int, task_mode: str = "trigger", bbox: list = [ "task_mode": task_mode, "bbox": bbox, } - cls.insert1( - facemap_pose_estimation_task_insert - ) + cls.insert1(facemap_pose_estimation_task_insert) + insert_pose_estimation_task = generate - + + @schema class FacemapPoseEstimation(dj.Computed): """Results of facemap pose estimation @@ -317,37 +327,31 @@ def make(self, key): video_symlinks.append(video_symlink.as_posix()) # Trigger Facemap Pose Estimation Inference - if task_mode == "trigger": - # Triggering facemap for pose estimation requires: - # - model_path: full path to the directory containing the trained model - # - video_filepaths: full paths to the video files for inference - # - analyze_video_params: optional parameters to analyze video (uses facemap default params) + if ( + facemap_result_path.exists() & full_metadata_path.exists() + ) or task_mode == "load": # Load results and do not rerun processing + ( + body_part_position_entry, + inference_duration, + total_frame_count, + creation_time, + ) = _load_facemap_results(key, facemap_result_path, full_metadata_path) + self.insert1( + { + **key, + "pose_estimation_time": creation_time, + "pose_estimation_duration": inference_duration, + "total_frame_count": total_frame_count, + } + ) + self.BodyPartPosition.insert(body_part_position_entry) + return + elif task_mode == "trigger": from facemap.pose import pose as facemap_pose, model_loader - # If output files have been created, load the output - if ( - facemap_result_path.exists() & full_metadata_path.exists() - ): # Load results and do not rerun processing - ( - body_part_position_entry, - inference_duration, - total_frame_count, - creation_time, - ) = _load_facemap_results(key, facemap_result_path, full_metadata_path) - self.insert1( - { - **key, - "pose_estimation_time": creation_time, - "pose_estimation_duration": inference_duration, - "total_frame_count": total_frame_count, - } - ) - self.BodyPartPosition.insert(body_part_position_entry) - return - bbox = (FacemapPoseEstimationTask & key).fetch1("bbox") or [] - + # Model Name of interest should be specified by user during facemap task params manual update model_id = (FacemapPoseEstimationTask & key).fetch("model_id") @@ -378,25 +382,7 @@ def make(self, key): total_frame_count, creation_time, ) = _load_facemap_results(key, facemap_result_path, full_metadata_path) - - elif task_mode == "load": - # Load preprocessed inference results - ( - body_part_position_entry, - inference_duration, - total_frame_count, - creation_time, - ) = _load_facemap_results(key, facemap_result_path, full_metadata_path) - - self.insert1( - { - **key, - "pose_estimation_time": creation_time, - "pose_estimation_duration": inference_duration, - "total_frame_count": total_frame_count, - } - ) - self.BodyPartPosition.insert(body_part_position_entry) + self.BodyPartPosition.insert(body_part_position_entry) @classmethod def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame: @@ -446,7 +432,7 @@ def _load_facemap_results(key, facemap_result_path, full_metadata_path): with open(full_metadata_path, "rb") as f: metadata = pickle.load(f) keypoints_data = utils.load_keypoints(metadata["bodyparts"], facemap_result_path) - + # Facemap inference result is a 3D nested array with D1 - (x,y likelihood), D2 - bodyparts, D3 - frame count # body parts are ordered the same way as stored pose_x_coord = keypoints_data[0, :, :] # (bodyparts, frames) From a4c7d583290064554d63a18198627a903faed755 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:41:06 -0500 Subject: [PATCH 133/182] change self to cls --- element_facemap/facemap_inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index 5353221..cbeb535 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -175,7 +175,7 @@ def insert_new_model( model_description: str, full_model_path: str, ): - self.insert1( + cls.insert1( dict( model_id=model_id, model_name=model_name, From 5badd682483dad82636c85edfbffa1fa57643807 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:49:10 -0500 Subject: [PATCH 134/182] modify comments --- element_facemap/facemap_train.py | 212 ++++++++++++++++++------------- 1 file changed, 124 insertions(+), 88 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 0e9be1d..5d73bf3 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -18,7 +18,7 @@ def activate( facemap_train_schema_name: str, fbe_schema_name: str = None, - facemap_model_schema_name: str = None, + facemap_model_schema_name: str = None, *, create_schema: bool = True, create_tables: bool = True, @@ -112,7 +112,8 @@ class VideoFile(dj.Part): Attributes: FacemapTrainFileSet (foreign key) : FacemapTrainFileSet key. video_file_id (int) : Video File index - video_file_path ( varchar(255) ) : Path to file on disk relative to root.""" + video_file_path ( varchar(255) ) : Path to file on disk relative to root. + """ definition = """ # Paths of training files (e.g., .avi, .mp4 video files) -> master @@ -120,14 +121,14 @@ class VideoFile(dj.Part): --- video_file_path: varchar(255) """ - + class KeypointsFile(dj.Part): - """Keypoints File containing labels and paths in a given FacemapTrainFileSet + """Keypoints File and paths in a given FacemapTrainFileSet Attributes: FacemapTrainFileSet (foreign key) : FacemapTrainFileSet key. - file_id : Keypoint File index. - file_path ( varchar(255) ) : Path to file on disk relative to root.""" + file_path ( varchar(255) ) : Path to file on disk relative to root. + """ definition = """ # Paths of training files (e.g.: .h5 keypoints data file) -> master @@ -165,7 +166,7 @@ def insert_new_params( Insert a new set of training parameters into FacemapTrainParamSet. Args: - paramset_desc (str): Description of parameter set to be inserted + paramset_desc (str): Description of parameter set to be inserted params (dict): Dictionary including all settings to specify model training. Must include shuffle & trainingsetindex b/c not in config.yaml. project_path and video_sets will be overwritten by config.yaml. @@ -194,7 +195,6 @@ def insert_new_params( cls.insert1(param_dict) # if duplicate, will raise duplicate error - @schema class FacemapModelTrainingTask(dj.Manual): """Staging table for pairing videosets and training parameter sets @@ -203,10 +203,10 @@ class FacemapModelTrainingTask(dj.Manual): FacemapTrainFileSet (foreign key): FacemapTrainFileSet Key. FacemapTrainParamSet (foreign key): TrainingParamSet key. training_task_id (int): Unique ID for training task. - train_output_dir( varchar(255) ): Relative output directory for trained model + train_output_dir( varchar(255) ): Relative output directory for trained model refined_model_name ( varchar(32) ): Name for retrained model retrain_model_id (smallint): Model index, of FacemapModel table, to be used for retraining - model_description ( varchar(255) ): Optional. Model Description for insertion into FacemapModel + model_description ( varchar(255) ): Optional. Model Description for insertion into FacemapModel """ @@ -220,14 +220,19 @@ class FacemapModelTrainingTask(dj.Manual): -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id') model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel """ + def infer_output_dir(self, key, relative=True, mkdir=True): - video_file = (FacemapTrainFileSet.VideoFile & key).fetch("video_file_path", limit=1)[0] + video_file = (FacemapTrainFileSet.VideoFile & key).fetch( + "video_file_path", limit=1 + )[0] video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) processed_dir = Path(fbe.get_facemap_processed_data_dir()) output_dir = ( - processed_dir / video_dir.relative_to(root_dir) / f"facemap_train_{key['paramset_idx']}" + processed_dir + / video_dir.relative_to(root_dir) + / f"facemap_train_{key['paramset_idx']}" ) if mkdir: @@ -236,32 +241,37 @@ def infer_output_dir(self, key, relative=True, mkdir=True): return output_dir.relative_to(processed_dir) if relative else output_dir @classmethod - def insert_facemap_training_task(cls, - file_set_id, - training_task_id, - paramset_idx, - refined_model_name='refined_model', - model_description=None, - retrain_model_id=None): + def insert_facemap_training_task( + cls, + file_set_id, + training_task_id, + paramset_idx, + refined_model_name="refined_model", + model_description=None, + retrain_model_id=None, + ): key = {"file_set_id": file_set_id, "paramset_idx": paramset_idx} inferred_output_dir = cls().infer_output_dir(key, relative=True, mkdir=True) - facemap_training_task_insert = dict(**key, - training_task_id=training_task_id, - train_output_dir=inferred_output_dir.as_posix(), - refined_model_name=refined_model_name, - model_description=model_description, - retrain_model_id=retrain_model_id) + facemap_training_task_insert = dict( + **key, + training_task_id=training_task_id, + train_output_dir=inferred_output_dir.as_posix(), + refined_model_name=refined_model_name, + model_description=model_description, + retrain_model_id=retrain_model_id, + ) cls.insert1(facemap_training_task_insert) - + + @schema class FacemapModelTraining(dj.Computed): - """Automated Model training + """Automated Model training Attributes: FacemapModelTrainingTask (foreign key): FacemapModelTrainingTask key. train_model_time (datetime): Time of creation of newly trained model facemap_model_reference (smallint): Reference to index of facemap_inference.FacemapModel - + """ definition = """ @@ -276,93 +286,117 @@ def make(self, key): from facemap import utils import torch - train_output_dir = (FacemapModelTrainingTask & key).fetch1('train_output_dir') + train_output_dir = (FacemapModelTrainingTask & key).fetch1("train_output_dir") output_dir = find_full_path(fbe.get_facemap_root_data_dir(), train_output_dir) - video_files = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() - for fp in (FacemapTrainFileSet.VideoFile & key).fetch("video_file_path")] - - # manually specified .h5 keypoints file - keypoints_file = [find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() - for fp in (FacemapTrainFileSet.KeypointsFile & key).fetch("file_path")] - + video_files = [ + find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() + for fp in (FacemapTrainFileSet.VideoFile & key).fetch("video_file_path") + ] + + # manually specified .h5 keypoints file + keypoints_file = [ + find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() + for fp in (FacemapTrainFileSet.KeypointsFile & key).fetch("file_path") + ] + if len(keypoints_file) > 0: - keypoints_file = keypoints_file[0] # if multiple keypoints files are specified, select first file + keypoints_file = keypoints_file[ + 0 + ] # if multiple keypoints files are specified, select first file # Create a pose model object, specifying the video files - train_model = pose.Pose(filenames=[video_files]) # facemap expects list of list! - train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox - retrain_model_id = (FacemapModelTrainingTask & key).fetch1('retrain_model_id') + train_model = pose.Pose(filenames=[video_files]) # facemap expects list of list + train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox + retrain_model_id = (FacemapModelTrainingTask & key).fetch1("retrain_model_id") - if retrain_model_id is not None: # Retrain an existing model from the facemap_inference.FacemapModel table - + if ( + retrain_model_id is not None + ): # Retrain an existing model from the facemap_inference.FacemapModel table # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() - model_file = (facemap_inference.FacemapModel.File & {'model_id': retrain_model_id}).fetch1("model_file") + model_file = ( + facemap_inference.FacemapModel.File & {"model_id": retrain_model_id} + ).fetch1("model_file") # Set train_model object to load preexisting model train_model.model_name = model_file - + # Overwrite default train_model.net - train_model.net.load_state_dict(torch.load(model_file, map_location=train_model.device)) + train_model.net.load_state_dict( + torch.load(model_file, map_location=train_model.device) + ) # link model to torch device train_model.net.to(train_model.device) # Convert videos to images for train input - pre_selected_frame_ind = (FacemapModelTrainingTask & key).fetch1('selected_frame_ind') - - # Currently, only support single video training + pre_selected_frame_ind = (FacemapModelTrainingTask & key).fetch1( + "selected_frame_ind" + ) + + # Currently, only support single video training assert len(video_files) == 1 video_file = video_files[0] - if len(pre_selected_frame_ind) == 0: # set selected frames to all frames + if len(pre_selected_frame_ind) == 0: # set selected frames to all frames import cv2 + cap = cv2.VideoCapture(video_file) - selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) + selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) else: selected_frame_ind = pre_selected_frame_ind - # Load image frames from video + # Load image frames from video image_data = utils.load_images_from_video(video_file, selected_frame_ind) - # MULTIVIDEO TODO - # image_data = [] - # for video_file in video_files: - # if len(pre_selected_frame_ind) == 0: # set selected frames to all frames + # MULTIVIDEO TODO + # image_data = [] + # for video_file in video_files: + # if len(pre_selected_frame_ind) == 0: # set selected frames to all frames - # cap = cv2.VideoCapture(video_file) - # selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) - # else: - # selected_frame_ind = pre_selected_frame_ind + # cap = cv2.VideoCapture(video_file) + # selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) + # else: + # selected_frame_ind = pre_selected_frame_ind - # image_data.append(utils.load_images_from_video(video_file, selected_frame_ind)) + # image_data.append(utils.load_images_from_video(video_file, selected_frame_ind)) - # -- For multivideo image data reshaping - # cumframes, Ly, Lx, containers = utils.get_frame_details(video_files) - # LY, LX, sy, sx = utils.video_placement(Ly, Lx) - # reshaped_videos = utils.multivideo_reshape(image_data, LY, LX, Ly, Lx, sy, sx) + # -- For multivideo image data reshaping + # cumframes, Ly, Lx, containers = utils.get_frame_details(video_files) + # LY, LX, sy, sx = utils.video_placement(Ly, Lx) + # reshaped_videos = utils.multivideo_reshape(image_data, LY, LX, Ly, Lx, sy, sx) - keypoints_data = utils.load_keypoints(list(zip(*facemap_inference.BodyPart.contents))[0], keypoints_file) + keypoints_data = utils.load_keypoints( + list(zip(*facemap_inference.BodyPart.contents))[0], keypoints_file + ) # Model Parameters (fetch from TrainingParamSet as dict) - training_params = (FacemapTrainParamSet & f'paramset_idx={key["paramset_idx"]}').fetch1('params') - refined_model_name = (FacemapModelTrainingTask & key).fetch1('refined_model_name') # default = "refined_model" + training_params = ( + FacemapTrainParamSet & f'paramset_idx={key["paramset_idx"]}' + ).fetch1("params") + refined_model_name = (FacemapModelTrainingTask & key).fetch1( + "refined_model_name" + ) # default = "refined_model" # Train model using train function defined in Pose class - train_model.net = train_model.train(image_data[:,:,:,0], # note: using 0 index for now (could average across this dimension) - keypoints_data.T, # needs to be transposed - int(training_params['epochs']), - int(training_params['batch_size']), - float(training_params['learning_rate']), - int(training_params['weight_decay']), - bbox=training_params['bbox']) - + train_model.net = train_model.train( + image_data[ + :, :, :, 0 + ], # note: using 0 index for now (could average across this dimension) + keypoints_data.T, # needs to be transposed + int(training_params["epochs"]), + int(training_params["batch_size"]), + float(training_params["learning_rate"]), + int(training_params["weight_decay"]), + bbox=training_params["bbox"], + ) + # Save Refined Model - model_output_path = output_dir / f'{refined_model_name}.pth' + model_output_path = output_dir / f"{refined_model_name}.pth" train_model.save_model(model_output_path) - model_id = (FacemapModelTrainingTask & key).fetch1('model_id') - model_description = (FacemapModelTrainingTask & key).fetch1('model_description') + model_id = (FacemapModelTrainingTask & key).fetch1("model_id") + model_description = (FacemapModelTrainingTask & key).fetch1("model_description") # Insert newly trained model results into FacemapModel table try: @@ -372,16 +406,18 @@ def make(self, key): except ValueError: # case that nothing has been inserted model_id = 0 - facemap_inference.FacemapModel().insert_new_model(model_id, - refined_model_name, - model_description, - model_output_path) - - train_model_time = datetime.fromtimestamp(model_output_path.stat().st_mtime).strftime( - "%Y-%m-%d %H:%M:%S" + facemap_inference.FacemapModel().insert_new_model( + model_id, refined_model_name, model_description, model_output_path ) - self.insert1( - {**key, 'train_model_time': train_model_time, 'facemap_model_reference': model_id} + train_model_time = datetime.fromtimestamp( + model_output_path.stat().st_mtime + ).strftime("%Y-%m-%d %H:%M:%S") - ) \ No newline at end of file + self.insert1( + { + **key, + "train_model_time": train_model_time, + "facemap_model_reference": model_id, + } + ) From 7b9ee254a751b0fe526de8d2d60ed3fadfa8fecb Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 17:50:50 -0500 Subject: [PATCH 135/182] change root dit imports --- element_facemap/facemap_train.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 5d73bf3..c42ebcb 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -9,7 +9,10 @@ from . import facial_behavior_estimation as fbe from . import facemap_inference - +from .facial_behavior_estimation import ( + get_facemap_root_data_dir, + get_facemap_processed_data_dir, +) schema = dj.schema() _linking_module = None @@ -225,10 +228,10 @@ def infer_output_dir(self, key, relative=True, mkdir=True): video_file = (FacemapTrainFileSet.VideoFile & key).fetch( "video_file_path", limit=1 )[0] - video_dir = find_full_path(fbe.get_facemap_root_data_dir(), video_file).parent - root_dir = find_root_directory(fbe.get_facemap_root_data_dir(), video_dir) + video_dir = find_full_path(get_facemap_root_data_dir(), video_file).parent + root_dir = find_root_directory(get_facemap_root_data_dir(), video_dir) - processed_dir = Path(fbe.get_facemap_processed_data_dir()) + processed_dir = Path(get_facemap_processed_data_dir()) output_dir = ( processed_dir / video_dir.relative_to(root_dir) @@ -287,16 +290,16 @@ def make(self, key): import torch train_output_dir = (FacemapModelTrainingTask & key).fetch1("train_output_dir") - output_dir = find_full_path(fbe.get_facemap_root_data_dir(), train_output_dir) + output_dir = find_full_path(get_facemap_root_data_dir(), train_output_dir) video_files = [ - find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() + find_full_path(get_facemap_root_data_dir(), fp).as_posix() for fp in (FacemapTrainFileSet.VideoFile & key).fetch("video_file_path") ] # manually specified .h5 keypoints file keypoints_file = [ - find_full_path(fbe.get_facemap_root_data_dir(), fp).as_posix() + find_full_path(get_facemap_root_data_dir(), fp).as_posix() for fp in (FacemapTrainFileSet.KeypointsFile & key).fetch("file_path") ] From 846434699e1852a3c6fbba62b93c3a7892cc6b5c Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 18:30:34 -0500 Subject: [PATCH 136/182] update infer_output_dir, inserts --- element_facemap/facemap_train.py | 58 ++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 22 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index c42ebcb..f2aaf33 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -155,12 +155,20 @@ class FacemapTrainParamSet(dj.Lookup): # Parameters to specify a facemap model training instance paramset_idx : smallint --- - paramset_desc : varchar(128) # Description of parameterset used for - param_set_hash : uuid # hash identifying this paramset + paramset_desc : varchar(128) # Optional description of parameterset + param_set_hash : uuid # hash identifying this paramset unique index (param_set_hash) - params : longblob # numpy array of initially selected ROIs + params : longblob # required model training parameters """ + required_parameters = ( + "weight_decay", + "bbox", + "learning_rate", + "epochs", + "batch_size", + ) + @classmethod def insert_new_params( cls, paramset_desc: str, params: dict, paramset_idx: int = None @@ -171,31 +179,36 @@ def insert_new_params( Args: paramset_desc (str): Description of parameter set to be inserted params (dict): Dictionary including all settings to specify model training. - Must include shuffle & trainingsetindex b/c not in config.yaml. - project_path and video_sets will be overwritten by config.yaml. - Note that trainingsetindex is 0-indexed paramset_idx (int): optional, integer to represent parameters. """ + for required_param in cls.required_parameters: + assert required_param in params, ( + "Missing required parameter: " + required_param + ) + if paramset_idx is None: paramset_idx = ( dj.U().aggr(cls, n="max(paramset_idx)").fetch1("n") or 0 ) + 1 - param_dict = { - "paramset_idx": paramset_idx, - "paramset_desc": paramset_desc, - "param_set_hash": dict_to_uuid(params), - "params": params, - } - param_query = cls & {"param_set_hash": param_dict["param_set_hash"]} + paramset_hash = dict_to_uuid(params) # store to avoid recompute + param_query = cls & {"param_set_hash": paramset_hash} + # If the specified param-set already exists if param_query: existing_paramset_idx = param_query.fetch1("paramset_idx") if existing_paramset_idx == int(paramset_idx): # If existing_idx same: return # job done else: - cls.insert1(param_dict) # if duplicate, will raise duplicate error + cls.insert1( + dict( + paramset_idx=paramset_idx, + paramset_desc=paramset_desc, + param_set_hash=paramset_hash, + params=params, + ), + ) # if duplicate, will raise duplicate error @schema @@ -255,15 +268,16 @@ def insert_facemap_training_task( ): key = {"file_set_id": file_set_id, "paramset_idx": paramset_idx} inferred_output_dir = cls().infer_output_dir(key, relative=True, mkdir=True) - facemap_training_task_insert = dict( - **key, - training_task_id=training_task_id, - train_output_dir=inferred_output_dir.as_posix(), - refined_model_name=refined_model_name, - model_description=model_description, - retrain_model_id=retrain_model_id, + cls.insert1( + dict( + **key, + training_task_id=training_task_id, + train_output_dir=inferred_output_dir.as_posix(), + refined_model_name=refined_model_name, + model_description=model_description, + retrain_model_id=retrain_model_id, + ), ) - cls.insert1(facemap_training_task_insert) @schema From e2701bb54e08f10cf1d17ea6ed39afb3b463ccdc Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 27 Oct 2023 18:44:15 -0500 Subject: [PATCH 137/182] remove multivideo comment --- element_facemap/facemap_train.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index f2aaf33..05c0a1c 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -366,23 +366,6 @@ def make(self, key): # Load image frames from video image_data = utils.load_images_from_video(video_file, selected_frame_ind) - # MULTIVIDEO TODO - # image_data = [] - # for video_file in video_files: - # if len(pre_selected_frame_ind) == 0: # set selected frames to all frames - - # cap = cv2.VideoCapture(video_file) - # selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) - # else: - # selected_frame_ind = pre_selected_frame_ind - - # image_data.append(utils.load_images_from_video(video_file, selected_frame_ind)) - - # -- For multivideo image data reshaping - # cumframes, Ly, Lx, containers = utils.get_frame_details(video_files) - # LY, LX, sy, sx = utils.video_placement(Ly, Lx) - # reshaped_videos = utils.multivideo_reshape(image_data, LY, LX, Ly, Lx, sy, sx) - keypoints_data = utils.load_keypoints( list(zip(*facemap_inference.BodyPart.contents))[0], keypoints_file ) From 35d4287da6b7683b87fc9765e6d219f13c8c1088 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 30 Oct 2023 18:51:27 -0500 Subject: [PATCH 138/182] update comment to remove Session and VideoRecording unneeded dependencies --- element_facemap/facemap_train.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 05c0a1c..04db595 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -42,10 +42,6 @@ def activate( linking_module (str): a module (or name) containing the required dependencies. Dependencies: - Upstream tables: - + Session: A parent table to VideoRecording, identifying a recording session - + Equipment: A parent table to VideoRecording, identifying video recording equipment - + VideoRecording: A parent table to FacemapInferenceTask, identifying videos to be used in inference Functions: + get_facemap_root_data_dir() -> list Retrieves the root data directory(s) with face recordings for all From b050b6cd83ad316dd3ea8b1bf2ee8ec16532b4db Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 30 Oct 2023 20:11:34 -0500 Subject: [PATCH 139/182] modify image_data loading to convert images to grayscale before running through model training --- element_facemap/facemap_train.py | 33 ++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 04db595..08e590f 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -38,7 +38,7 @@ def activate( create_schema (bool): when True (default), create schema in the database if it does not yet exist. create_tables (bool): when True (default), create schema tables in the database - if they do not yet exist. + if they do not yet exist.i linking_module (str): a module (or name) containing the required dependencies. Dependencies: @@ -298,6 +298,7 @@ def make(self, key): from facemap.pose import pose from facemap import utils import torch + import cv2 train_output_dir = (FacemapModelTrainingTask & key).fetch1("train_output_dir") output_dir = find_full_path(get_facemap_root_data_dir(), train_output_dir) @@ -349,18 +350,28 @@ def make(self, key): # Currently, only support single video training assert len(video_files) == 1 - video_file = video_files[0] - if len(pre_selected_frame_ind) == 0: # set selected frames to all frames - import cv2 - cap = cv2.VideoCapture(video_file) - selected_frame_ind = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) + # Load video capture to iterate through frames and convert to grayscale + cap = cv2.VideoCapture(video_file) + if len(pre_selected_frame_ind) == 0: # set selected frames to all frames + selected_frame_indices = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) else: - selected_frame_ind = pre_selected_frame_ind + selected_frame_indices = pre_selected_frame_ind + frames = [] + for frame_ind in selected_frame_indices: + if int(cap.get(cv2.CAP_PROP_POS_FRAMES)) != frame_ind: + cap.set(cv2.CAP_PROP_POS_FRAMES, frame_ind) + ret, frame = cap.read() + gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + if ret: + frames.append(gray_frame) + else: + print("Error reading frame") + image_data = np.array(frames) # Load image frames from video - image_data = utils.load_images_from_video(video_file, selected_frame_ind) + # image_data = utils.load_images_from_video(video_file, selected_frame_ind) keypoints_data = utils.load_keypoints( list(zip(*facemap_inference.BodyPart.contents))[0], keypoints_file @@ -375,10 +386,8 @@ def make(self, key): ) # default = "refined_model" # Train model using train function defined in Pose class - train_model.net = train_model.train( - image_data[ - :, :, :, 0 - ], # note: using 0 index for now (could average across this dimension) + train_model.train( + image_data, keypoints_data.T, # needs to be transposed int(training_params["epochs"]), int(training_params["batch_size"]), From 7f65129632f8836f0a35ebd961c7662b25d893c6 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 30 Oct 2023 20:59:42 -0500 Subject: [PATCH 140/182] modify output_dir to be unique for each run --- element_facemap/facemap_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 08e590f..940bae7 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -244,7 +244,7 @@ def infer_output_dir(self, key, relative=True, mkdir=True): output_dir = ( processed_dir / video_dir.relative_to(root_dir) - / f"facemap_train_{key['paramset_idx']}" + / f"facemap_train_fileset{key['file_set_id']}_paramset{key['paramset_idx']}" ) if mkdir: From 28dc011a0611e67cbf26cd0efeb207e255203f41 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 30 Oct 2023 21:32:42 -0500 Subject: [PATCH 141/182] modify pose estimation task insertion --- element_facemap/facemap_inference.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index cbeb535..9698836 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -205,7 +205,7 @@ def insert_new_model( class FacemapPoseEstimationTask(dj.Manual): """Staging table for pairing of video recordings and Facemap parameters before processing. - Attributes: + Attributes: s fbe.VideoRecording (foreign key) : Primary key for VideoRecording table. FacemapModel (foreign key) : Primary key for the facemap model table pose_estimation_output_dir ( varchar(255), optional) : output dir storing the results @@ -228,6 +228,20 @@ class FacemapPoseEstimationTask(dj.Manual): @classmethod def infer_output_dir(cls, key, relative=True, mkdir=True): + """Infer an output directory for an entry in the FacemapPoseEstimationTask table. + + Args: + key (_type_): Primary key from the FacemapPoseEstimationTask table. + relative (bool, optional): If True, pose_estimation_output_dir is returned relative to + imaging_root_dir. Defaults to True. + mkdir (bool, optional): If True, create pose_estimation_output_dir. Defaults to True. + + Returns: + dir (str): A default output directory for inference results (pose_estimation_output_dir + in FacemapPoseEstimationTask) based on the following convention: + processed_dir / relative_video_dir / {facemap_recordingid}_{model_id} + e.g.: sub1/sess1/video_files/facemap_recording_id0_model0 + """ video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0] video_dir = find_full_path(get_facemap_root_data_dir(), video_file).parent root_dir = find_root_directory(get_facemap_root_data_dir(), video_dir) @@ -235,7 +249,9 @@ def infer_output_dir(cls, key, relative=True, mkdir=True): model_id = (FacemapPoseEstimationTask & key).fetch1("model_id") processed_dir = Path(get_facemap_processed_data_dir()) output_dir = ( - processed_dir / video_dir.relative_to(root_dir) / f"facemap_{model_id}" + processed_dir + / video_dir.relative_to(root_dir) + / f"facemap_recordingid{key['recording_id']}_model{model_id}" ) if mkdir: @@ -249,16 +265,14 @@ def generate(cls, key, model_id: int, task_mode: str = "trigger", bbox: list = [ Args: model_id (int): User Specified model identification number - session_key (dict): + key (dict): Primary key from FacemapPoseEstimationTask table relative_video_paths (list): list of relative videos in VideoRecording.File table task_mode (str, optional): 'load' or 'trigger. Defaults to 'trigger'. bbox (list, optional): Bounding box for processing. Defaults to []. """ - device_id = (fbe.VideoRecording & key).fetch("device_id") vrec_key = (fbe.VideoRecording & key).fetch("key") - model_key = (FacemapModel & f"model_id={model_id}").fetch1("KEY") - pose_estimation_output_dir = cls.infer_output_dir(vrec_key) + pose_estimation_output_dir = cls.infer_output_dir(key) facemap_pose_estimation_task_insert = { **vrec_key, From 6b6f55d1a015d795a9d699c01cf21988d9bf07cc Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 30 Oct 2023 22:01:51 -0500 Subject: [PATCH 142/182] add docstrings and modify PoseEstimationTask defintion --- element_facemap/facemap_inference.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index 9698836..25566f3 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -205,7 +205,7 @@ def insert_new_model( class FacemapPoseEstimationTask(dj.Manual): """Staging table for pairing of video recordings and Facemap parameters before processing. - Attributes: s + Attributes: fbe.VideoRecording (foreign key) : Primary key for VideoRecording table. FacemapModel (foreign key) : Primary key for the facemap model table pose_estimation_output_dir ( varchar(255), optional) : output dir storing the results @@ -220,10 +220,10 @@ class FacemapPoseEstimationTask(dj.Manual): -> fbe.VideoRecording -> FacemapModel --- - pose_estimation_output_dir : varchar(255) # output dir - stores results of Facemap Pose estimation analysis - task_mode='trigger' : enum('load', 'trigger') - bbox=null : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] - task_description='' : varchar(128) + pose_estimation_output_dir : varchar(255) # output dir - stores results of Facemap Pose estimation analysis + task_description : varchar(128) # Optional. Addtional task description + task_mode='trigger' : enum('load', 'trigger') + bbox=null : longblob # list containing bounding box for cropping the video [x1, x2, y1, y2] """ @classmethod @@ -231,7 +231,7 @@ def infer_output_dir(cls, key, relative=True, mkdir=True): """Infer an output directory for an entry in the FacemapPoseEstimationTask table. Args: - key (_type_): Primary key from the FacemapPoseEstimationTask table. + key (dict): Primary key from the FacemapPoseEstimationTask table. relative (bool, optional): If True, pose_estimation_output_dir is returned relative to imaging_root_dir. Defaults to True. mkdir (bool, optional): If True, create pose_estimation_output_dir. Defaults to True. @@ -260,24 +260,30 @@ def infer_output_dir(cls, key, relative=True, mkdir=True): return output_dir.relative_to(processed_dir) if relative else output_dir @classmethod - def generate(cls, key, model_id: int, task_mode: str = "trigger", bbox: list = []): + def generate( + cls, + key, + model_id: int, + task_description: str = "", + task_mode: str = "trigger", + bbox: list = [], + ): """Generate a unique pose estimation task for each of the relative_video_paths Args: model_id (int): User Specified model identification number key (dict): Primary key from FacemapPoseEstimationTask table + e.g.: {subject="sub1",session_id=0,recording_id=0,model_id=0} relative_video_paths (list): list of relative videos in VideoRecording.File table task_mode (str, optional): 'load' or 'trigger. Defaults to 'trigger'. bbox (list, optional): Bounding box for processing. Defaults to []. """ - vrec_key = (fbe.VideoRecording & key).fetch("key") - model_key = (FacemapModel & f"model_id={model_id}").fetch1("KEY") pose_estimation_output_dir = cls.infer_output_dir(key) facemap_pose_estimation_task_insert = { - **vrec_key, - **model_key, + **key, "pose_estimation_output_dir": pose_estimation_output_dir, + "task_description": task_description, "task_mode": task_mode, "bbox": bbox, } From 20d44214518eca0e275287d02f6d7455c39a8e9e Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 30 Oct 2023 22:03:38 -0500 Subject: [PATCH 143/182] update formatting --- element_facemap/facemap_inference.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index 25566f3..a3dd68e 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -280,14 +280,15 @@ def generate( """ pose_estimation_output_dir = cls.infer_output_dir(key) - facemap_pose_estimation_task_insert = { - **key, - "pose_estimation_output_dir": pose_estimation_output_dir, - "task_description": task_description, - "task_mode": task_mode, - "bbox": bbox, - } - cls.insert1(facemap_pose_estimation_task_insert) + cls.insert1( + dict( + **key, + pose_estimation_output_dir=pose_estimation_output_dir, + task_description=task_description, + task_mode=task_mode, + bbox=bbox, + ), + ) insert_pose_estimation_task = generate From 535f51b743877ae4f923ac6a6c73befa7c69277f Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 31 Oct 2023 13:11:00 -0500 Subject: [PATCH 144/182] add back in selected_frame_ind --- element_facemap/facemap_train.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 940bae7..e50312f 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -214,8 +214,8 @@ class FacemapModelTrainingTask(dj.Manual): Attributes: FacemapTrainFileSet (foreign key): FacemapTrainFileSet Key. FacemapTrainParamSet (foreign key): TrainingParamSet key. - training_task_id (int): Unique ID for training task. train_output_dir( varchar(255) ): Relative output directory for trained model + selected_frame_ind (blob) : Array of frames to run training on, if not specified all frames used. refined_model_name ( varchar(32) ): Name for retrained model retrain_model_id (smallint): Model index, of FacemapModel table, to be used for retraining model_description ( varchar(255) ): Optional. Model Description for insertion into FacemapModel @@ -225,10 +225,10 @@ class FacemapModelTrainingTask(dj.Manual): definition = """ # Specification for a facemap model training instance -> FacemapTrainFileSet # video(s) and files for training -> FacemapTrainParamSet # Initially specified ROIs - training_task_id : smallint --- train_output_dir : varchar(255) # Trained model output directory - refined_model_name='refined_model' : varchar(32) # Specify name of finetuned/trained model filepath + selected_frame_ind=null : blob # Optional, array of frames to run training on + refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/trained model filepath -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id') model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel """ @@ -256,7 +256,6 @@ def infer_output_dir(self, key, relative=True, mkdir=True): def insert_facemap_training_task( cls, file_set_id, - training_task_id, paramset_idx, refined_model_name="refined_model", model_description=None, @@ -267,7 +266,6 @@ def insert_facemap_training_task( cls.insert1( dict( **key, - training_task_id=training_task_id, train_output_dir=inferred_output_dir.as_posix(), refined_model_name=refined_model_name, model_description=model_description, From 1f07bbe2f712637e635d9c00cc06b81737bfb124 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 31 Oct 2023 13:29:49 -0500 Subject: [PATCH 145/182] modify retrained model id to be specified as varchar --- element_facemap/facemap_train.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index e50312f..f5d16e0 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -226,11 +226,11 @@ class FacemapModelTrainingTask(dj.Manual): -> FacemapTrainFileSet # video(s) and files for training -> FacemapTrainParamSet # Initially specified ROIs --- - train_output_dir : varchar(255) # Trained model output directory - selected_frame_ind=null : blob # Optional, array of frames to run training on - refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/trained model filepath - -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id') - model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel + train_output_dir : varchar(255) # Trained model output directory + selected_frame_ind=null : blob # Optional, array of frames to run training on + refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/trained model filepath + -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id') : varchar(64) # Specify retrain_model_id + model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel """ def infer_output_dir(self, key, relative=True, mkdir=True): From 92523b4ddda46a0c4edadd170c7b2d897f083502 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 31 Oct 2023 17:39:27 -0500 Subject: [PATCH 146/182] add RetrainedModelFile table to store rel file path of model --- element_facemap/facemap_inference.py | 4 ++++ element_facemap/facemap_train.py | 22 +++++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index a3dd68e..58691ed 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -165,6 +165,7 @@ class File(dj.Part): -> master --- model_file: attach # model file attachment + relative_file_path: varchar(256) # relative path of model_file """ @classmethod @@ -197,6 +198,9 @@ def insert_new_model( dict( model_id=model_id, model_file=full_model_path, + relative_file_path=full_model_path.relative_to( + fbe.get_facemap_root_data_dir() + ), ), ) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index f5d16e0..84e99e9 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -289,9 +289,21 @@ class FacemapModelTraining(dj.Computed): -> FacemapModelTrainingTask --- train_model_time : datetime # Time of creation of train model file - facemap_model_reference : smallint # Reference to index FacemapModel table """ + class RetrainedModelFile(dj.Part): + """Stores newly trained models + + Args: + dj (_type_): _description_ + """ + + definition = """ + -> master + --- + -> facemap_inference.FacemapModel.File.proj(retrain_file='relative_file_path') : varchar(256) + """ + def make(self, key): from facemap.pose import pose from facemap import utils @@ -413,6 +425,14 @@ def make(self, key): model_id, refined_model_name, model_description, model_output_path ) + self.RetrainedModelFile.insert1( + dict( + retrain_file=( + facemap_inference.FacemapModel & f"model_id={model_id}" + ).fetch1("relative_file_path") + ), + ) + train_model_time = datetime.fromtimestamp( model_output_path.stat().st_mtime ).strftime("%Y-%m-%d %H:%M:%S") From ba8a0ce04f002fc1cbdda5eb4eed2222d8dfe171 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 31 Oct 2023 21:38:44 -0500 Subject: [PATCH 147/182] update train task insertionA --- element_facemap/facemap_train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 84e99e9..2a25527 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -260,6 +260,7 @@ def insert_facemap_training_task( refined_model_name="refined_model", model_description=None, retrain_model_id=None, + selected_frame_ind=None, ): key = {"file_set_id": file_set_id, "paramset_idx": paramset_idx} inferred_output_dir = cls().infer_output_dir(key, relative=True, mkdir=True) @@ -269,6 +270,7 @@ def insert_facemap_training_task( train_output_dir=inferred_output_dir.as_posix(), refined_model_name=refined_model_name, model_description=model_description, + selected_frame_ind=selected_frame_ind, retrain_model_id=retrain_model_id, ), ) From 0dd303aa887e92fd10cba24e5e3287e8a70c2b8f Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 31 Oct 2023 21:51:21 -0500 Subject: [PATCH 148/182] add relative file path to FacemapModel table for RetrainModel part table reference --- element_facemap/facemap_inference.py | 1 + 1 file changed, 1 insertion(+) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index 58691ed..2f709f6 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -159,6 +159,7 @@ class File(dj.Part): Attributes: FacemapModel (foreign key): Facemap model primary key. model_file ( attach ): filepath of facemap model, relative to root data dir + """ definition = """ From c2496e82a262de354513fed96233f7973aa3f0f2 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 31 Oct 2023 21:57:07 -0500 Subject: [PATCH 149/182] remove varchar specification for .proj attributes --- element_facemap/facemap_train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 2a25527..f125f5b 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -229,7 +229,7 @@ class FacemapModelTrainingTask(dj.Manual): train_output_dir : varchar(255) # Trained model output directory selected_frame_ind=null : blob # Optional, array of frames to run training on refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/trained model filepath - -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id') : varchar(64) # Specify retrain_model_id + -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # Specify retrain_model_id model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel """ @@ -303,7 +303,7 @@ class RetrainedModelFile(dj.Part): definition = """ -> master --- - -> facemap_inference.FacemapModel.File.proj(retrain_file='relative_file_path') : varchar(256) + -> facemap_inference.FacemapModel.File.proj(retrain_file='relative_file_path') """ def make(self, key): From 45bfc0a07ebc7954ed2efdff6bf7a684f2e3cf8f Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 31 Oct 2023 22:08:53 -0500 Subject: [PATCH 150/182] set retrain model id to have a none default value --- element_facemap/facemap_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index f125f5b..5c9fca2 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -229,7 +229,7 @@ class FacemapModelTrainingTask(dj.Manual): train_output_dir : varchar(255) # Trained model output directory selected_frame_ind=null : blob # Optional, array of frames to run training on refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/trained model filepath - -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # Specify retrain_model_id + -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id')=None # Specify retrain_model_id model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel """ From c1bd4a31328a887481c79203cce7d8a298cd9725 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Tue, 31 Oct 2023 22:09:18 -0500 Subject: [PATCH 151/182] set retrain model id to have a null default value --- element_facemap/facemap_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 5c9fca2..491633a 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -229,7 +229,7 @@ class FacemapModelTrainingTask(dj.Manual): train_output_dir : varchar(255) # Trained model output directory selected_frame_ind=null : blob # Optional, array of frames to run training on refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/trained model filepath - -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id')=None # Specify retrain_model_id + -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id')=null # Specify retrain_model_id model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel """ From c22b03a99cc6080601f2978a92f6b6063a654731 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 12:14:50 -0500 Subject: [PATCH 152/182] specify retrain model id to be nullable --- element_facemap/facemap_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 491633a..44d85fe 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -229,7 +229,7 @@ class FacemapModelTrainingTask(dj.Manual): train_output_dir : varchar(255) # Trained model output directory selected_frame_ind=null : blob # Optional, array of frames to run training on refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/trained model filepath - -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id')=null # Specify retrain_model_id + -> [nullable]facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # Specify retrain_model_id model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel """ From 6ad96a7cc358b1df2cb3cffd7c78627bce4f8d69 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 13:22:33 -0500 Subject: [PATCH 153/182] remove model_id fetch --- element_facemap/facemap_train.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 44d85fe..1a2dd2c 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -296,8 +296,8 @@ class FacemapModelTraining(dj.Computed): class RetrainedModelFile(dj.Part): """Stores newly trained models - Args: - dj (_type_): _description_ + Attributes: + FacemapModelTraining (foreign key): """ definition = """ @@ -412,7 +412,6 @@ def make(self, key): model_output_path = output_dir / f"{refined_model_name}.pth" train_model.save_model(model_output_path) - model_id = (FacemapModelTrainingTask & key).fetch1("model_id") model_description = (FacemapModelTrainingTask & key).fetch1("model_description") # Insert newly trained model results into FacemapModel table From 75bef35565a4068828a4d47c76e01dd31b7120cd Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 13:40:24 -0500 Subject: [PATCH 154/182] update to not allow user to specify model id --- element_facemap/facemap_train.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 1a2dd2c..fe112b6 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -417,8 +417,7 @@ def make(self, key): # Insert newly trained model results into FacemapModel table try: model_ids = facemap_inference.FacemapModel.fetch("model_id") - if model_id is None or model_id in model_ids: - model_id = max(model_ids) + 1 + model_id = max(model_ids) + 1 except ValueError: # case that nothing has been inserted model_id = 0 From 9c34a284930f449338a5c139e19aeb409a6053a5 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 13:49:00 -0500 Subject: [PATCH 155/182] small bugfix --- element_facemap/facemap_inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index 2f709f6..d9b0fe2 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -200,7 +200,7 @@ def insert_new_model( model_id=model_id, model_file=full_model_path, relative_file_path=full_model_path.relative_to( - fbe.get_facemap_root_data_dir() + fbe.get_facemap_root_data_dir()[0] ), ), ) From 51784fac613b27437d20afc3adee14519abea326 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 14:02:48 -0500 Subject: [PATCH 156/182] remove relative_filepath option for model table --- element_facemap/facemap_inference.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index d9b0fe2..a7aac0e 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -158,7 +158,7 @@ class File(dj.Part): Attributes: FacemapModel (foreign key): Facemap model primary key. - model_file ( attach ): filepath of facemap model, relative to root data dir + model_file ( attach ): file attachment of facemap model, stored as binary in db """ @@ -166,7 +166,6 @@ class File(dj.Part): -> master --- model_file: attach # model file attachment - relative_file_path: varchar(256) # relative path of model_file """ @classmethod @@ -199,8 +198,6 @@ def insert_new_model( dict( model_id=model_id, model_file=full_model_path, - relative_file_path=full_model_path.relative_to( - fbe.get_facemap_root_data_dir()[0] ), ), ) From 0d6a0c9c360261594c9600ffb6c8bacfefd2d35e Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 14:04:18 -0500 Subject: [PATCH 157/182] switch to store file attachment in part table --- element_facemap/facemap_train.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index fe112b6..2732ecf 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -303,7 +303,7 @@ class RetrainedModelFile(dj.Part): definition = """ -> master --- - -> facemap_inference.FacemapModel.File.proj(retrain_file='relative_file_path') + -> facemap_inference.FacemapModel.File.proj(retrain_file='model_file') """ def make(self, key): @@ -441,6 +441,5 @@ def make(self, key): { **key, "train_model_time": train_model_time, - "facemap_model_reference": model_id, } ) From 41b18386875f21a46d96dfea03792496adf8cb74 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 14:06:53 -0500 Subject: [PATCH 158/182] fix typo --- element_facemap/facemap_inference.py | 1 - 1 file changed, 1 deletion(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index a7aac0e..d3aa530 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -198,7 +198,6 @@ def insert_new_model( dict( model_id=model_id, model_file=full_model_path, - ), ), ) From ba498f1dc3024334347c00e2e48d7e4952c4aafa Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 14:13:19 -0500 Subject: [PATCH 159/182] modify RetrainedModelFile insert --- element_facemap/facemap_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 2732ecf..5595773 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -429,7 +429,7 @@ def make(self, key): dict( retrain_file=( facemap_inference.FacemapModel & f"model_id={model_id}" - ).fetch1("relative_file_path") + ).fetch1("model_file") ), ) From 8281f6af3334b8f453f911bd079bea44398fa261 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 14:21:03 -0500 Subject: [PATCH 160/182] modify retrained file insert --- element_facemap/facemap_train.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 5595773..3a7e593 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -427,9 +427,8 @@ def make(self, key): self.RetrainedModelFile.insert1( dict( - retrain_file=( - facemap_inference.FacemapModel & f"model_id={model_id}" - ).fetch1("model_file") + train_model_time=train_model_time, + retrain_file=model_output_path, ), ) @@ -438,8 +437,8 @@ def make(self, key): ).strftime("%Y-%m-%d %H:%M:%S") self.insert1( - { + dict( **key, - "train_model_time": train_model_time, - } + train_model_time=train_model_time, + ) ) From bc4f6d60f9dd5a3c69f72c1cf53c08dec5ef4f65 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 14:27:43 -0500 Subject: [PATCH 161/182] move part table insert after insert into facemapProcessing --- element_facemap/facemap_train.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 3a7e593..28eac8c 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -425,13 +425,6 @@ def make(self, key): model_id, refined_model_name, model_description, model_output_path ) - self.RetrainedModelFile.insert1( - dict( - train_model_time=train_model_time, - retrain_file=model_output_path, - ), - ) - train_model_time = datetime.fromtimestamp( model_output_path.stat().st_mtime ).strftime("%Y-%m-%d %H:%M:%S") @@ -442,3 +435,10 @@ def make(self, key): train_model_time=train_model_time, ) ) + + self.RetrainedModelFile.insert1( + dict( + train_model_time=train_model_time, + retrain_file=model_output_path, + ), + ) From b7d9f28e215466f548a020bce259fd986a25ea1e Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 14:32:09 -0500 Subject: [PATCH 162/182] update RetrainedModelFile insert --- element_facemap/facemap_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 28eac8c..4555493 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -438,7 +438,7 @@ def make(self, key): self.RetrainedModelFile.insert1( dict( - train_model_time=train_model_time, + **key, retrain_file=model_output_path, ), ) From f83da5c6ba40bf25be60a86be9e36bd7334aabe9 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 17:51:24 -0500 Subject: [PATCH 163/182] modify RetrainedModelFile part table to store model_id as link to model table --- element_facemap/facemap_train.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 4555493..13a9c71 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -226,11 +226,11 @@ class FacemapModelTrainingTask(dj.Manual): -> FacemapTrainFileSet # video(s) and files for training -> FacemapTrainParamSet # Initially specified ROIs --- - train_output_dir : varchar(255) # Trained model output directory - selected_frame_ind=null : blob # Optional, array of frames to run training on - refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/trained model filepath - -> [nullable]facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # Specify retrain_model_id - model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel + train_output_dir : varchar(255) # Trained model output directory + selected_frame_ind=null : blob # Optional, array of frames to run training on + refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/trained model filepath + -> [nullable]facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # Specify retrain_model_id + model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel """ def infer_output_dir(self, key, relative=True, mkdir=True): @@ -302,8 +302,9 @@ class RetrainedModelFile(dj.Part): definition = """ -> master + -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # link to facemap model table --- - -> facemap_inference.FacemapModel.File.proj(retrain_file='model_file') + retrain_model_file: attach # retrained model file attachment """ def make(self, key): @@ -439,6 +440,7 @@ def make(self, key): self.RetrainedModelFile.insert1( dict( **key, + retrain_model_id=model_id, retrain_file=model_output_path, ), ) From d19fc01b5a76a201567130a96d62258f2d00909a Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 18:06:14 -0500 Subject: [PATCH 164/182] modify refined_model_name -> refined_model_prefix --- element_facemap/facemap_train.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 13a9c71..a431680 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -228,7 +228,7 @@ class FacemapModelTrainingTask(dj.Manual): --- train_output_dir : varchar(255) # Trained model output directory selected_frame_ind=null : blob # Optional, array of frames to run training on - refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/trained model filepath + refined_model_prefix='refined_model_' : varchar(128) # Specify prefix of finetuned/trained model filepath -> [nullable]facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # Specify retrain_model_id model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel """ @@ -394,8 +394,8 @@ def make(self, key): training_params = ( FacemapTrainParamSet & f'paramset_idx={key["paramset_idx"]}' ).fetch1("params") - refined_model_name = (FacemapModelTrainingTask & key).fetch1( - "refined_model_name" + refined_model_prefix = (FacemapModelTrainingTask & key).fetch1( + "refined_model_prefix" ) # default = "refined_model" # Train model using train function defined in Pose class @@ -410,7 +410,7 @@ def make(self, key): ) # Save Refined Model - model_output_path = output_dir / f"{refined_model_name}.pth" + model_output_path = output_dir / f"{refined_model_prefix}.pth" train_model.save_model(model_output_path) model_description = (FacemapModelTrainingTask & key).fetch1("model_description") @@ -423,7 +423,7 @@ def make(self, key): model_id = 0 facemap_inference.FacemapModel().insert_new_model( - model_id, refined_model_name, model_description, model_output_path + model_id, refined_model_prefix, model_description, model_output_path ) train_model_time = datetime.fromtimestamp( From 49acca2c65e128b00e7a25ae0629204f30059e34 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 18:23:27 -0500 Subject: [PATCH 165/182] rename var and update comment --- element_facemap/facemap_train.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index a431680..43cf1a8 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -21,7 +21,7 @@ def activate( facemap_train_schema_name: str, fbe_schema_name: str = None, - facemap_model_schema_name: str = None, + facemap_inference_schema_name: str = None, *, create_schema: bool = True, create_tables: bool = True, @@ -33,7 +33,7 @@ def activate( facemap_train_schema_name (str): schema name on the database server to activate the `facemap_train` schema of element-facemap fbe_schema_name (str): Schema name on the database server to activate the 'facial_behavioral_estimation - facemap_model_schema_name (str): Schema name on the database server to activate the + facemap_inference_schema_name (str): Schema name on the database server to activate the `facemap_inference` schema of element-facemap create_schema (bool): when True (default), create schema in the database if it does not yet exist. @@ -74,9 +74,9 @@ def activate( linking_module=linking_module, ) - # activate facial pose model schema + # activate facemap inference schema facemap_inference.activate( - facemap_model_schema_name, + facemap_inference_schema_name, create_schema=create_schema, create_tables=create_tables, linking_module=linking_module, From 59a009fb29d707a04e6bf799862fc472d96627eb Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 18:26:16 -0500 Subject: [PATCH 166/182] update comment --- element_facemap/facemap_inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index d3aa530..d72b195 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -137,7 +137,7 @@ class FacemapModel(dj.Manual): definition = """ model_id : int # user assigned ID associated with a unique model --- - model_name : varchar(64) # name of model + model_name : varchar(64) # optional name/prefix of model model_description: varchar(1000) # optional model description """ From a4ec8a8516db044c1e79dbbd7b81862a6c282cfb Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 18:39:35 -0500 Subject: [PATCH 167/182] add print for when paramset exists --- element_facemap/facemap_train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 43cf1a8..59a2549 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -195,6 +195,7 @@ def insert_new_params( if param_query: existing_paramset_idx = param_query.fetch1("paramset_idx") if existing_paramset_idx == int(paramset_idx): # If existing_idx same: + print(f"Paramset already exists at index {paramset_idx}") return # job done else: cls.insert1( From 90cae4469fa5fa242a9a212fa564000320503e51 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 18:41:38 -0500 Subject: [PATCH 168/182] update refine_model_prefix in task insertion --- element_facemap/facemap_train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 59a2549..90663af 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -258,7 +258,7 @@ def insert_facemap_training_task( cls, file_set_id, paramset_idx, - refined_model_name="refined_model", + refined_model_prefix="refined_model", model_description=None, retrain_model_id=None, selected_frame_ind=None, @@ -269,7 +269,7 @@ def insert_facemap_training_task( dict( **key, train_output_dir=inferred_output_dir.as_posix(), - refined_model_name=refined_model_name, + refined_model_prefix=refined_model_prefix, model_description=model_description, selected_frame_ind=selected_frame_ind, retrain_model_id=retrain_model_id, From 13a2fed82711dca35ce2a80056f700a1ad1e35b2 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 1 Nov 2023 18:56:15 -0500 Subject: [PATCH 169/182] fix typo --- element_facemap/facemap_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 90663af..30ceb8b 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -442,6 +442,6 @@ def make(self, key): dict( **key, retrain_model_id=model_id, - retrain_file=model_output_path, + retrain_model_file=model_output_path, ), ) From 940a65e02e7068fa8d9b7e581c0d28f7a060b742 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 3 Nov 2023 14:24:23 -0500 Subject: [PATCH 170/182] modify refined_model_prefix to be a prefix --- element_facemap/facemap_train.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 30ceb8b..35726fc 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -229,7 +229,7 @@ class FacemapModelTrainingTask(dj.Manual): --- train_output_dir : varchar(255) # Trained model output directory selected_frame_ind=null : blob # Optional, array of frames to run training on - refined_model_prefix='refined_model_' : varchar(128) # Specify prefix of finetuned/trained model filepath + refined_model_prefix='' : varchar(128) # Specify prefix of finetuned/trained model filepath -> [nullable]facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # Specify retrain_model_id model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel """ @@ -258,7 +258,7 @@ def insert_facemap_training_task( cls, file_set_id, paramset_idx, - refined_model_prefix="refined_model", + refined_model_prefix="", model_description=None, retrain_model_id=None, selected_frame_ind=None, @@ -411,7 +411,8 @@ def make(self, key): ) # Save Refined Model - model_output_path = output_dir / f"{refined_model_prefix}.pth" + refined_model_name = f"{refined_model_prefix}_refined_model.pth" + model_output_path = output_dir / refined_model_name train_model.save_model(model_output_path) model_description = (FacemapModelTrainingTask & key).fetch1("model_description") @@ -424,7 +425,10 @@ def make(self, key): model_id = 0 facemap_inference.FacemapModel().insert_new_model( - model_id, refined_model_prefix, model_description, model_output_path + model_id, + f"{refined_model_prefix}_refined_model.pth", + model_description, + model_output_path, ) train_model_time = datetime.fromtimestamp( From 1c6db803006425bcd2e03cd82ba5ff9f5d93862a Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 3 Nov 2023 14:38:30 -0500 Subject: [PATCH 171/182] Update element_facemap/facemap_train.py Co-authored-by: Thinh Nguyen --- element_facemap/facemap_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 35726fc..1b9502a 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -231,7 +231,7 @@ class FacemapModelTrainingTask(dj.Manual): selected_frame_ind=null : blob # Optional, array of frames to run training on refined_model_prefix='' : varchar(128) # Specify prefix of finetuned/trained model filepath -> [nullable]facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # Specify retrain_model_id - model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel + model_description='' : varchar(255) # Optional, model desc for insertion into FacemapModel """ def infer_output_dir(self, key, relative=True, mkdir=True): From 7b16366de0730dc0dd5a23772d5646b109b1f492 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 3 Nov 2023 14:38:38 -0500 Subject: [PATCH 172/182] Update element_facemap/facemap_train.py Co-authored-by: Thinh Nguyen --- element_facemap/facemap_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 1b9502a..fd7a3d1 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -228,7 +228,7 @@ class FacemapModelTrainingTask(dj.Manual): -> FacemapTrainParamSet # Initially specified ROIs --- train_output_dir : varchar(255) # Trained model output directory - selected_frame_ind=null : blob # Optional, array of frames to run training on + selected_frame_ind=null : blob # Optional, array of frame indices to run training on refined_model_prefix='' : varchar(128) # Specify prefix of finetuned/trained model filepath -> [nullable]facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # Specify retrain_model_id model_description='' : varchar(255) # Optional, model desc for insertion into FacemapModel From dd1250db182caac360c34839f1925f60db057e5f Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 3 Nov 2023 14:40:17 -0500 Subject: [PATCH 173/182] modify infer_output_dir to be a class function --- element_facemap/facemap_train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 35726fc..fecfd51 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -234,6 +234,7 @@ class FacemapModelTrainingTask(dj.Manual): model_description=None : varchar(255) # Optional, model desc for insertion into FacemapModel """ + @classmethod def infer_output_dir(self, key, relative=True, mkdir=True): video_file = (FacemapTrainFileSet.VideoFile & key).fetch( "video_file_path", limit=1 From 7bec3ddfc1686815622c6e264854fc4e9f1a5d23 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 3 Nov 2023 15:13:39 -0500 Subject: [PATCH 174/182] Update facemap_inference.py --- element_facemap/facemap_inference.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index d72b195..414beb0 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -135,10 +135,10 @@ class FacemapModel(dj.Manual): """ definition = """ - model_id : int # user assigned ID associated with a unique model + model_id : int # user assigned ID associated with a unique model --- - model_name : varchar(64) # optional name/prefix of model - model_description: varchar(1000) # optional model description + model_name : varchar(64) # name of model + model_description='' : varchar(1000) # optional model description """ class BodyPart(dj.Part): From 912e32a81e20451d5fe6df7edd74e9f36c4f31bc Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 3 Nov 2023 19:14:49 -0500 Subject: [PATCH 175/182] update refined_model_prefix -> refined_model_name (prefixed to output_dir.stem) --- element_facemap/facemap_train.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index bc5139c..87660c5 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -218,7 +218,7 @@ class FacemapModelTrainingTask(dj.Manual): train_output_dir( varchar(255) ): Relative output directory for trained model selected_frame_ind (blob) : Array of frames to run training on, if not specified all frames used. refined_model_name ( varchar(32) ): Name for retrained model - retrain_model_id (smallint): Model index, of FacemapModel table, to be used for retraining + base_model_id (smallint): Model index, of FacemapModel table, to be used for retraining model_description ( varchar(255) ): Optional. Model Description for insertion into FacemapModel """ @@ -229,8 +229,8 @@ class FacemapModelTrainingTask(dj.Manual): --- train_output_dir : varchar(255) # Trained model output directory selected_frame_ind=null : blob # Optional, array of frame indices to run training on - refined_model_prefix='' : varchar(128) # Specify prefix of finetuned/trained model filepath - -> [nullable]facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # Specify retrain_model_id + refined_model_name='' : varchar(128) # Specify name of finetuned/trained model filepath + -> [nullable]facemap_inference.FacemapModel.proj(base_model_id='model_id') # Specify base model to be retrained model_description='' : varchar(255) # Optional, model desc for insertion into FacemapModel """ @@ -261,7 +261,7 @@ def insert_facemap_training_task( paramset_idx, refined_model_prefix="", model_description=None, - retrain_model_id=None, + base_model_id=None, selected_frame_ind=None, ): key = {"file_set_id": file_set_id, "paramset_idx": paramset_idx} @@ -273,7 +273,7 @@ def insert_facemap_training_task( refined_model_prefix=refined_model_prefix, model_description=model_description, selected_frame_ind=selected_frame_ind, - retrain_model_id=retrain_model_id, + base_model_id=base_model_id, ), ) @@ -304,7 +304,7 @@ class RetrainedModelFile(dj.Part): definition = """ -> master - -> facemap_inference.FacemapModel.proj(retrain_model_id='model_id') # link to facemap model table + -> facemap_inference.FacemapModel.proj(base_model_id='model_id') # link to facemap model table --- retrain_model_file: attach # retrained model file attachment """ @@ -337,14 +337,14 @@ def make(self, key): # Create a pose model object, specifying the video files train_model = pose.Pose(filenames=[video_files]) # facemap expects list of list train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox - retrain_model_id = (FacemapModelTrainingTask & key).fetch1("retrain_model_id") + base_model_id = (FacemapModelTrainingTask & key).fetch1("base_model_id") if ( - retrain_model_id is not None + base_model_id is not None ): # Retrain an existing model from the facemap_inference.FacemapModel table # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() model_file = ( - facemap_inference.FacemapModel.File & {"model_id": retrain_model_id} + facemap_inference.FacemapModel.File & {"model_id": base_model_id} ).fetch1("model_file") # Set train_model object to load preexisting model @@ -396,8 +396,8 @@ def make(self, key): training_params = ( FacemapTrainParamSet & f'paramset_idx={key["paramset_idx"]}' ).fetch1("params") - refined_model_prefix = (FacemapModelTrainingTask & key).fetch1( - "refined_model_prefix" + refined_model_name = (FacemapModelTrainingTask & key).fetch1( + "refined_model_name" ) # default = "refined_model" # Train model using train function defined in Pose class @@ -412,7 +412,7 @@ def make(self, key): ) # Save Refined Model - refined_model_name = f"{refined_model_prefix}_refined_model.pth" + refined_model_name = f"{refined_model_name}_{output_dir.stem}_refined_model.pth" model_output_path = output_dir / refined_model_name train_model.save_model(model_output_path) @@ -427,7 +427,7 @@ def make(self, key): facemap_inference.FacemapModel().insert_new_model( model_id, - f"{refined_model_prefix}_refined_model.pth", + refined_model_name, model_description, model_output_path, ) @@ -446,7 +446,7 @@ def make(self, key): self.RetrainedModelFile.insert1( dict( **key, - retrain_model_id=model_id, + base_model_id=model_id, retrain_model_file=model_output_path, ), ) From 2d7af0fc8ca7a65363a54d207041dfdc3c0c0454 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 3 Nov 2023 19:18:30 -0500 Subject: [PATCH 176/182] train_output_dir -> training_output_dir --- element_facemap/facemap_train.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 87660c5..229f13b 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -215,7 +215,7 @@ class FacemapModelTrainingTask(dj.Manual): Attributes: FacemapTrainFileSet (foreign key): FacemapTrainFileSet Key. FacemapTrainParamSet (foreign key): TrainingParamSet key. - train_output_dir( varchar(255) ): Relative output directory for trained model + training_output_dir( varchar(255) ): Relative output directory for trained model selected_frame_ind (blob) : Array of frames to run training on, if not specified all frames used. refined_model_name ( varchar(32) ): Name for retrained model base_model_id (smallint): Model index, of FacemapModel table, to be used for retraining @@ -227,9 +227,9 @@ class FacemapModelTrainingTask(dj.Manual): -> FacemapTrainFileSet # video(s) and files for training -> FacemapTrainParamSet # Initially specified ROIs --- - train_output_dir : varchar(255) # Trained model output directory + training_output_dir : varchar(255) # Trained model output directory selected_frame_ind=null : blob # Optional, array of frame indices to run training on - refined_model_name='' : varchar(128) # Specify name of finetuned/trained model filepath + refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/retrained model -> [nullable]facemap_inference.FacemapModel.proj(base_model_id='model_id') # Specify base model to be retrained model_description='' : varchar(255) # Optional, model desc for insertion into FacemapModel """ @@ -259,7 +259,7 @@ def insert_facemap_training_task( cls, file_set_id, paramset_idx, - refined_model_prefix="", + refined_model_name="refined_model", model_description=None, base_model_id=None, selected_frame_ind=None, @@ -269,8 +269,8 @@ def insert_facemap_training_task( cls.insert1( dict( **key, - train_output_dir=inferred_output_dir.as_posix(), - refined_model_prefix=refined_model_prefix, + training_output_dir=inferred_output_dir.as_posix(), + refined_model_name=refined_model_name, model_description=model_description, selected_frame_ind=selected_frame_ind, base_model_id=base_model_id, @@ -315,8 +315,10 @@ def make(self, key): import torch import cv2 - train_output_dir = (FacemapModelTrainingTask & key).fetch1("train_output_dir") - output_dir = find_full_path(get_facemap_root_data_dir(), train_output_dir) + training_output_dir = (FacemapModelTrainingTask & key).fetch1( + "training_output_dir" + ) + output_dir = find_full_path(get_facemap_root_data_dir(), training_output_dir) video_files = [ find_full_path(get_facemap_root_data_dir(), fp).as_posix() @@ -412,7 +414,7 @@ def make(self, key): ) # Save Refined Model - refined_model_name = f"{refined_model_name}_{output_dir.stem}_refined_model.pth" + refined_model_name = f"{refined_model_name}_{output_dir.stem}.pth" model_output_path = output_dir / refined_model_name train_model.save_model(model_output_path) From d600f32e1a6a6cf8e5d8cfd481e3423d501a2179 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 3 Nov 2023 19:18:59 -0500 Subject: [PATCH 177/182] model_description -> varchar(1000) --- element_facemap/facemap_train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 229f13b..aeb4400 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -219,7 +219,7 @@ class FacemapModelTrainingTask(dj.Manual): selected_frame_ind (blob) : Array of frames to run training on, if not specified all frames used. refined_model_name ( varchar(32) ): Name for retrained model base_model_id (smallint): Model index, of FacemapModel table, to be used for retraining - model_description ( varchar(255) ): Optional. Model Description for insertion into FacemapModel + model_description ( varchar(1000) ): Optional. Model Description for insertion into FacemapModel """ @@ -231,7 +231,7 @@ class FacemapModelTrainingTask(dj.Manual): selected_frame_ind=null : blob # Optional, array of frame indices to run training on refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/retrained model -> [nullable]facemap_inference.FacemapModel.proj(base_model_id='model_id') # Specify base model to be retrained - model_description='' : varchar(255) # Optional, model desc for insertion into FacemapModel + model_description='' : varchar(1000) # Optional, model desc for insertion into FacemapModel """ @classmethod From 4fd55b1d318ebeaa450cd0b13186b6e4312ed497 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Fri, 3 Nov 2023 19:19:54 -0500 Subject: [PATCH 178/182] =?UTF-8?q?facemap=5Finference.FacemapModel.proj(r?= =?UTF-8?q?etrain=5Fmodel=5Fid=3D=E2=80=98model=5Fid=E2=80=99)=20=20#=20li?= =?UTF-8?q?nk=20to=20facemap=20model=20table=20=20=20-->=20make=20into=20a?= =?UTF-8?q?=20secondary=20attr?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- element_facemap/facemap_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index aeb4400..8078546 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -304,8 +304,8 @@ class RetrainedModelFile(dj.Part): definition = """ -> master - -> facemap_inference.FacemapModel.proj(base_model_id='model_id') # link to facemap model table --- + -> facemap_inference.FacemapModel.proj(base_model_id='model_id') # link to facemap model table retrain_model_file: attach # retrained model file attachment """ From 92e67fa08c447a0102c14210ae72587fa837aa2a Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 6 Nov 2023 12:30:29 -0800 Subject: [PATCH 179/182] modify design to make base_model_id required, with expectation that default model is intially inserted --- element_facemap/facemap_train.py | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py index 8078546..27a1f57 100644 --- a/element_facemap/facemap_train.py +++ b/element_facemap/facemap_train.py @@ -230,7 +230,7 @@ class FacemapModelTrainingTask(dj.Manual): training_output_dir : varchar(255) # Trained model output directory selected_frame_ind=null : blob # Optional, array of frame indices to run training on refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/retrained model - -> [nullable]facemap_inference.FacemapModel.proj(base_model_id='model_id') # Specify base model to be retrained + -> facemap_inference.FacemapModel.proj(base_model_id='model_id') # Specify base model from FacemapModel to be retrained model_description='' : varchar(1000) # Optional, model desc for insertion into FacemapModel """ @@ -341,24 +341,22 @@ def make(self, key): train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox base_model_id = (FacemapModelTrainingTask & key).fetch1("base_model_id") - if ( - base_model_id is not None - ): # Retrain an existing model from the facemap_inference.FacemapModel table - # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() - model_file = ( - facemap_inference.FacemapModel.File & {"model_id": base_model_id} - ).fetch1("model_file") + # Retrain an existing model from the facemap_inference.FacemapModel table + # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() + model_file = ( + facemap_inference.FacemapModel.File & {"model_id": base_model_id} + ).fetch1("model_file") - # Set train_model object to load preexisting model - train_model.model_name = model_file + # Set train_model object to load preexisting model + train_model.model_name = model_file - # Overwrite default train_model.net - train_model.net.load_state_dict( - torch.load(model_file, map_location=train_model.device) - ) + # Overwrite default train_model.net + train_model.net.load_state_dict( + torch.load(model_file, map_location=train_model.device) + ) - # link model to torch device - train_model.net.to(train_model.device) + # link model to torch device + train_model.net.to(train_model.device) # Convert videos to images for train input pre_selected_frame_ind = (FacemapModelTrainingTask & key).fetch1( From 35b289456168a1b3f71f123d90b568a7d9440440 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 6 Nov 2023 15:42:28 -0800 Subject: [PATCH 180/182] remove unused model_id from insert_pose_estimation_task --- element_facemap/facemap_inference.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index 414beb0..a942b3f 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -264,7 +264,6 @@ def infer_output_dir(cls, key, relative=True, mkdir=True): def generate( cls, key, - model_id: int, task_description: str = "", task_mode: str = "trigger", bbox: list = [], @@ -272,7 +271,6 @@ def generate( """Generate a unique pose estimation task for each of the relative_video_paths Args: - model_id (int): User Specified model identification number key (dict): Primary key from FacemapPoseEstimationTask table e.g.: {subject="sub1",session_id=0,recording_id=0,model_id=0} relative_video_paths (list): list of relative videos in VideoRecording.File table From 19d175a3c4e05029cdd597aa66c940b0de60fa82 Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Mon, 6 Nov 2023 15:45:05 -0800 Subject: [PATCH 181/182] update infer_output_dir to pull model_id from key --- element_facemap/facemap_inference.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/element_facemap/facemap_inference.py b/element_facemap/facemap_inference.py index a942b3f..2014912 100644 --- a/element_facemap/facemap_inference.py +++ b/element_facemap/facemap_inference.py @@ -247,12 +247,11 @@ def infer_output_dir(cls, key, relative=True, mkdir=True): video_dir = find_full_path(get_facemap_root_data_dir(), video_file).parent root_dir = find_root_directory(get_facemap_root_data_dir(), video_dir) - model_id = (FacemapPoseEstimationTask & key).fetch1("model_id") processed_dir = Path(get_facemap_processed_data_dir()) output_dir = ( processed_dir / video_dir.relative_to(root_dir) - / f"facemap_recordingid{key['recording_id']}_model{model_id}" + / f"facemap_recordingid{key['recording_id']}_model{key['model_id']}" ) if mkdir: From 6f6ca3a70db49b873208c8cc89afda55592d9a2b Mon Sep 17 00:00:00 2001 From: Sidharth Hulyalkar Date: Wed, 8 Nov 2023 11:37:26 -0800 Subject: [PATCH 182/182] remove training --- element_facemap/facemap_train.py | 452 ------------------------------- 1 file changed, 452 deletions(-) delete mode 100644 element_facemap/facemap_train.py diff --git a/element_facemap/facemap_train.py b/element_facemap/facemap_train.py deleted file mode 100644 index 27a1f57..0000000 --- a/element_facemap/facemap_train.py +++ /dev/null @@ -1,452 +0,0 @@ -import datajoint as dj -import inspect -import importlib -import os -from pathlib import Path -from datetime import datetime -import numpy as np -from element_interface.utils import find_full_path, dict_to_uuid, find_root_directory - -from . import facial_behavior_estimation as fbe -from . import facemap_inference -from .facial_behavior_estimation import ( - get_facemap_root_data_dir, - get_facemap_processed_data_dir, -) - -schema = dj.schema() -_linking_module = None - - -def activate( - facemap_train_schema_name: str, - fbe_schema_name: str = None, - facemap_inference_schema_name: str = None, - *, - create_schema: bool = True, - create_tables: bool = True, - linking_module: str = None, -): - """Activate this schema. - - Args: - facemap_train_schema_name (str): schema name on the database server to activate - the `facemap_train` schema of element-facemap - fbe_schema_name (str): Schema name on the database server to activate the 'facial_behavioral_estimation - facemap_inference_schema_name (str): Schema name on the database server to activate the - `facemap_inference` schema of element-facemap - create_schema (bool): when True (default), create schema in the database if it - does not yet exist. - create_tables (bool): when True (default), create schema tables in the database - if they do not yet exist.i - linking_module (str): a module (or name) containing the required dependencies. - - Dependencies: - Functions: - + get_facemap_root_data_dir() -> list - Retrieves the root data directory(s) with face recordings for all - subject/sessions. Returns a string for the full path to the root data directory. - + get_facemap_processed_data_dir(session_key: dict) -> str - Optional function to retrieve the desired output directory - for Facemap files for a given session. If unspecified, - the output is stored in the video folder for the session, which is the default behavior of Facemap. - Returns a string of the absolute path of the output directory. - - """ - - if isinstance(linking_module, str): - linking_module = importlib.import_module(linking_module) - assert inspect.ismodule( - linking_module - ), "The argument 'dependency' must be a module's name or a module" - assert hasattr( - linking_module, "get_facemap_root_data_dir" - ), "The linking module must specify a lookup function for a root data directory" - - global _linking_module - _linking_module = linking_module - - # activate facial behavioral extimation (fbe) schema - fbe.activate( - fbe_schema_name, - create_schema=create_schema, - create_tables=create_tables, - linking_module=linking_module, - ) - - # activate facemap inference schema - facemap_inference.activate( - facemap_inference_schema_name, - create_schema=create_schema, - create_tables=create_tables, - linking_module=linking_module, - ) - - # activate facemap train schema - schema.activate( - facemap_train_schema_name, - create_schema=create_schema, - create_tables=create_tables, - add_objects=_linking_module.__dict__, - ) - - -# ----------------------------- Table declarations ---------------------- - - -@schema -class FacemapTrainFileSet(dj.Manual): - """Collection of files associated with a given training set. - - Attributes: - file_set_id (int): Unique ID for each collection of training files.""" - - definition = """ # Set of vids in training set - file_set_id: int - """ - - class VideoFile(dj.Part): - """Video File IDs and paths in a given FacemapTrainFileSet - - Attributes: - FacemapTrainFileSet (foreign key) : FacemapTrainFileSet key. - video_file_id (int) : Video File index - video_file_path ( varchar(255) ) : Path to file on disk relative to root. - """ - - definition = """ # Paths of training files (e.g., .avi, .mp4 video files) - -> master - video_file_id: int - --- - video_file_path: varchar(255) - """ - - class KeypointsFile(dj.Part): - """Keypoints File and paths in a given FacemapTrainFileSet - - Attributes: - FacemapTrainFileSet (foreign key) : FacemapTrainFileSet key. - file_path ( varchar(255) ) : Path to file on disk relative to root. - """ - - definition = """ # Paths of training files (e.g.: .h5 keypoints data file) - -> master - --- - file_path: varchar(255) - """ - - -@schema -class FacemapTrainParamSet(dj.Lookup): - """Parameters used to train a model - - Attributes: - paramset_idx (smallint): Index uniqely identifying each paramset. - paramset_desc ( varchar(128) ): Description of paramset. - param_set_hash (uuid): Hash identifying this paramset. - params (longblob): Dictionary of all applicable parameters. - Note: param_set_hash must be unique.""" - - definition = """ - # Parameters to specify a facemap model training instance - paramset_idx : smallint - --- - paramset_desc : varchar(128) # Optional description of parameterset - param_set_hash : uuid # hash identifying this paramset - unique index (param_set_hash) - params : longblob # required model training parameters - """ - - required_parameters = ( - "weight_decay", - "bbox", - "learning_rate", - "epochs", - "batch_size", - ) - - @classmethod - def insert_new_params( - cls, paramset_desc: str, params: dict, paramset_idx: int = None - ): - """ - Insert a new set of training parameters into FacemapTrainParamSet. - - Args: - paramset_desc (str): Description of parameter set to be inserted - params (dict): Dictionary including all settings to specify model training. - paramset_idx (int): optional, integer to represent parameters. - """ - - for required_param in cls.required_parameters: - assert required_param in params, ( - "Missing required parameter: " + required_param - ) - - if paramset_idx is None: - paramset_idx = ( - dj.U().aggr(cls, n="max(paramset_idx)").fetch1("n") or 0 - ) + 1 - - paramset_hash = dict_to_uuid(params) # store to avoid recompute - param_query = cls & {"param_set_hash": paramset_hash} - - # If the specified param-set already exists - if param_query: - existing_paramset_idx = param_query.fetch1("paramset_idx") - if existing_paramset_idx == int(paramset_idx): # If existing_idx same: - print(f"Paramset already exists at index {paramset_idx}") - return # job done - else: - cls.insert1( - dict( - paramset_idx=paramset_idx, - paramset_desc=paramset_desc, - param_set_hash=paramset_hash, - params=params, - ), - ) # if duplicate, will raise duplicate error - - -@schema -class FacemapModelTrainingTask(dj.Manual): - """Staging table for pairing videosets and training parameter sets - - Attributes: - FacemapTrainFileSet (foreign key): FacemapTrainFileSet Key. - FacemapTrainParamSet (foreign key): TrainingParamSet key. - training_output_dir( varchar(255) ): Relative output directory for trained model - selected_frame_ind (blob) : Array of frames to run training on, if not specified all frames used. - refined_model_name ( varchar(32) ): Name for retrained model - base_model_id (smallint): Model index, of FacemapModel table, to be used for retraining - model_description ( varchar(1000) ): Optional. Model Description for insertion into FacemapModel - - """ - - definition = """ # Specification for a facemap model training instance - -> FacemapTrainFileSet # video(s) and files for training - -> FacemapTrainParamSet # Initially specified ROIs - --- - training_output_dir : varchar(255) # Trained model output directory - selected_frame_ind=null : blob # Optional, array of frame indices to run training on - refined_model_name='refined_model' : varchar(128) # Specify name of finetuned/retrained model - -> facemap_inference.FacemapModel.proj(base_model_id='model_id') # Specify base model from FacemapModel to be retrained - model_description='' : varchar(1000) # Optional, model desc for insertion into FacemapModel - """ - - @classmethod - def infer_output_dir(self, key, relative=True, mkdir=True): - video_file = (FacemapTrainFileSet.VideoFile & key).fetch( - "video_file_path", limit=1 - )[0] - video_dir = find_full_path(get_facemap_root_data_dir(), video_file).parent - root_dir = find_root_directory(get_facemap_root_data_dir(), video_dir) - - processed_dir = Path(get_facemap_processed_data_dir()) - output_dir = ( - processed_dir - / video_dir.relative_to(root_dir) - / f"facemap_train_fileset{key['file_set_id']}_paramset{key['paramset_idx']}" - ) - - if mkdir: - output_dir.mkdir(parents=True, exist_ok=True) - - return output_dir.relative_to(processed_dir) if relative else output_dir - - @classmethod - def insert_facemap_training_task( - cls, - file_set_id, - paramset_idx, - refined_model_name="refined_model", - model_description=None, - base_model_id=None, - selected_frame_ind=None, - ): - key = {"file_set_id": file_set_id, "paramset_idx": paramset_idx} - inferred_output_dir = cls().infer_output_dir(key, relative=True, mkdir=True) - cls.insert1( - dict( - **key, - training_output_dir=inferred_output_dir.as_posix(), - refined_model_name=refined_model_name, - model_description=model_description, - selected_frame_ind=selected_frame_ind, - base_model_id=base_model_id, - ), - ) - - -@schema -class FacemapModelTraining(dj.Computed): - """Automated Model training - - Attributes: - FacemapModelTrainingTask (foreign key): FacemapModelTrainingTask key. - train_model_time (datetime): Time of creation of newly trained model - facemap_model_reference (smallint): Reference to index of facemap_inference.FacemapModel - - """ - - definition = """ - -> FacemapModelTrainingTask - --- - train_model_time : datetime # Time of creation of train model file - """ - - class RetrainedModelFile(dj.Part): - """Stores newly trained models - - Attributes: - FacemapModelTraining (foreign key): - """ - - definition = """ - -> master - --- - -> facemap_inference.FacemapModel.proj(base_model_id='model_id') # link to facemap model table - retrain_model_file: attach # retrained model file attachment - """ - - def make(self, key): - from facemap.pose import pose - from facemap import utils - import torch - import cv2 - - training_output_dir = (FacemapModelTrainingTask & key).fetch1( - "training_output_dir" - ) - output_dir = find_full_path(get_facemap_root_data_dir(), training_output_dir) - - video_files = [ - find_full_path(get_facemap_root_data_dir(), fp).as_posix() - for fp in (FacemapTrainFileSet.VideoFile & key).fetch("video_file_path") - ] - - # manually specified .h5 keypoints file - keypoints_file = [ - find_full_path(get_facemap_root_data_dir(), fp).as_posix() - for fp in (FacemapTrainFileSet.KeypointsFile & key).fetch("file_path") - ] - - if len(keypoints_file) > 0: - keypoints_file = keypoints_file[ - 0 - ] # if multiple keypoints files are specified, select first file - - # Create a pose model object, specifying the video files - train_model = pose.Pose(filenames=[video_files]) # facemap expects list of list - train_model.pose_prediction_setup() # Sets default facemap model as train_model.net, handles empty bbox - base_model_id = (FacemapModelTrainingTask & key).fetch1("base_model_id") - - # Retrain an existing model from the facemap_inference.FacemapModel table - # Fetch model file attachment so that model_file (.pth) is availible in Path.cwd() - model_file = ( - facemap_inference.FacemapModel.File & {"model_id": base_model_id} - ).fetch1("model_file") - - # Set train_model object to load preexisting model - train_model.model_name = model_file - - # Overwrite default train_model.net - train_model.net.load_state_dict( - torch.load(model_file, map_location=train_model.device) - ) - - # link model to torch device - train_model.net.to(train_model.device) - - # Convert videos to images for train input - pre_selected_frame_ind = (FacemapModelTrainingTask & key).fetch1( - "selected_frame_ind" - ) - - # Currently, only support single video training - assert len(video_files) == 1 - video_file = video_files[0] - - # Load video capture to iterate through frames and convert to grayscale - cap = cv2.VideoCapture(video_file) - if len(pre_selected_frame_ind) == 0: # set selected frames to all frames - selected_frame_indices = np.arange(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) - else: - selected_frame_indices = pre_selected_frame_ind - frames = [] - for frame_ind in selected_frame_indices: - if int(cap.get(cv2.CAP_PROP_POS_FRAMES)) != frame_ind: - cap.set(cv2.CAP_PROP_POS_FRAMES, frame_ind) - ret, frame = cap.read() - gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - if ret: - frames.append(gray_frame) - else: - print("Error reading frame") - image_data = np.array(frames) - - # Load image frames from video - # image_data = utils.load_images_from_video(video_file, selected_frame_ind) - - keypoints_data = utils.load_keypoints( - list(zip(*facemap_inference.BodyPart.contents))[0], keypoints_file - ) - - # Model Parameters (fetch from TrainingParamSet as dict) - training_params = ( - FacemapTrainParamSet & f'paramset_idx={key["paramset_idx"]}' - ).fetch1("params") - refined_model_name = (FacemapModelTrainingTask & key).fetch1( - "refined_model_name" - ) # default = "refined_model" - - # Train model using train function defined in Pose class - train_model.train( - image_data, - keypoints_data.T, # needs to be transposed - int(training_params["epochs"]), - int(training_params["batch_size"]), - float(training_params["learning_rate"]), - int(training_params["weight_decay"]), - bbox=training_params["bbox"], - ) - - # Save Refined Model - refined_model_name = f"{refined_model_name}_{output_dir.stem}.pth" - model_output_path = output_dir / refined_model_name - train_model.save_model(model_output_path) - - model_description = (FacemapModelTrainingTask & key).fetch1("model_description") - - # Insert newly trained model results into FacemapModel table - try: - model_ids = facemap_inference.FacemapModel.fetch("model_id") - model_id = max(model_ids) + 1 - except ValueError: # case that nothing has been inserted - model_id = 0 - - facemap_inference.FacemapModel().insert_new_model( - model_id, - refined_model_name, - model_description, - model_output_path, - ) - - train_model_time = datetime.fromtimestamp( - model_output_path.stat().st_mtime - ).strftime("%Y-%m-%d %H:%M:%S") - - self.insert1( - dict( - **key, - train_model_time=train_model_time, - ) - ) - - self.RetrainedModelFile.insert1( - dict( - **key, - base_model_id=model_id, - retrain_model_file=model_output_path, - ), - )