Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Comment1 #3

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "/root/anaconda3/envs/doppelgangers/bin/python",
"type": "python",
"request": "launch",
"program": "/root/workspace/code/ex1/DALF_CVPR_2023/train.py",
"console": "integratedTerminal",
"justMyCode": true,
"args": ["--mode", "ts1", "--gpu", "4,5,6,7"]
}
]
}
Binary file added assets/1.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/2.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/3.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/4.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/5.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/98.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added eval/SimulationICCV_MS.dict
Binary file not shown.
30 changes: 18 additions & 12 deletions eval/dalf_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

import distmat

modules = os.path.dirname(os.path.realpath(__file__)) + '/..'
modules = os.path.dirname(os.path.realpath(__file__)) + '/..' # 当前文件路径/../
sys.path.insert(0, modules)

try:
Expand All @@ -35,7 +35,7 @@ def warn(*args, **kwargs):
experiment_name = ''
exp_dir_target = ''


# 如果没有文件夹就创建
def check_dir(f):
if not os.path.exists(f):
os.makedirs(f)
Expand Down Expand Up @@ -65,13 +65,13 @@ def parseArg():

return args


# 标记无效位置的关键点
def correct_cadar_csv(csv):
for line in csv:
if line['x'] < 0 or line['y'] < 0:
line['valid'] = 0


# 从csv文件中提取有效位置的关键点
def gen_keypoints_from_csv(csv):
keypoints = []
for line in csv:
Expand All @@ -82,14 +82,16 @@ def gen_keypoints_from_csv(csv):

return keypoints


# 从文件中读取目录列表
def get_dir_list(filename):
with open(filename,'r') as f:
dirs = [line.rstrip('\n').rstrip() for line in f if line.rstrip('\n').rstrip()]
dirs = [line.rstrip('\n').rstrip() for line in f if line.rstrip('\n').rstrip()] # 内容中去掉换行符的非空行作为目录列表的元素
return dirs or False

# 计算两组特征点(ref_kps 和 tgt_kps)及其描述符(ref_descriptors 和 descriptors)之间的距离
def save_dist_matrix(ref_kps, ref_descriptors, ref_gt, tgt_kps, descriptors, tgt_gt, out_fname):
#np.linalg.norm(a-b)
# 初始化一个矩阵用于存储距离值,默认值为-1
print ('saving matrix in:', out_fname)
size = len(ref_gt)
dist_mat = np.full((size,size),-1.0,dtype = np.float32)
Expand All @@ -99,36 +101,39 @@ def save_dist_matrix(ref_kps, ref_descriptors, ref_gt, tgt_kps, descriptors, tgt
matching_sum = 0

begin = time.time()

# 遍历两组特征点及其描述符
for m in range(len(ref_kps)):
i = ref_kps[m].class_id
# 检查特征点是否有效
if ref_gt[i]['valid'] and tgt_gt[i]['valid']:
valid_m+=1
for n in range(len(tgt_kps)):
j = tgt_kps[n].class_id
# 检查特征点是否有效
if ref_gt[i]['valid'] and tgt_gt[i]['valid'] and tgt_gt[j]['valid']:
# 计算描述符之间的距离,并存储在 dist_mat 中
dist_mat[i,j] = np.linalg.norm(ref_descriptors[m]-descriptors[n]) #distance.euclidean(ref_d,tgt_d) #np.linalg.norm(ref_d-tgt_d)

print('Time to match NRLFeat: %.3f'%(time.time() - begin))

# 找到每行最小值所在的列索引
mins = np.argmin(np.where(dist_mat >= 0, dist_mat, 65000), axis=1)
# 统计匹配数 TODO 有逻辑问题
for i,j in enumerate(mins):
if i==j and ref_gt[i]['valid'] and tgt_gt[i]['valid']:
matches+=1

print ('--- MATCHES --- %d/%d'%(matches,valid_m))

# 将距离矩阵写入文件
with open(out_fname, 'w') as f:

f.write('%d %d\n'%(size,size))

for i in range(dist_mat.shape[0]):
for j in range(dist_mat.shape[1]):
f.write('%.8f '%(dist_mat[i,j]))


# TODO 比较两组关键点(kp 和 kp_gt)之间的对应关系
def get_gt_idx(kp, kp_gt):
kp_dict = {}
# 创建字典 {k.pt:idx}
for idx, k in enumerate(kp):
kp_dict['%.2f,%.2f'%(k.pt[0],k.pt[1])] = idx

Expand All @@ -140,6 +145,7 @@ def get_gt_idx(kp, kp_gt):

return gt_idx

# TODO 它遍历指定目录中的图像数据集,并针对指定的数据集进行DALF特征提取和匹配
def run_benchmark(args):

dev = torch.device('cuda' if torch.cuda.is_available else 'cpu')
Expand Down
4 changes: 4 additions & 0 deletions eval/distmat.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import numpy as np
import cv2

# 计算两组描述子之间的欧氏距离矩阵,并将结果保存到文件中
def save(desc_ref, desc_tgt, filename):
desc_ref = np.array(desc_ref)
desc_tgt = np.array(desc_tgt)
Expand Down Expand Up @@ -31,6 +32,7 @@ def save(desc_ref, desc_tgt, filename):

f.write('\n')

# 计算两组描述子之间的欧氏距离,然后将这些距离值保存到文件中。
def save_cvnorm(desc_ref, desc_tgt, filename):
desc_ref = np.array(desc_ref)
desc_tgt = np.array(desc_tgt)
Expand All @@ -49,6 +51,7 @@ def save_cvnorm(desc_ref, desc_tgt, filename):

f.write('\n')

# 从一个 CSV 文件中加载关键点信息并创建 OpenCV 的关键点对象列表。
def load_cv_kps(csv):
keypoints = []
for line in csv:
Expand All @@ -57,6 +60,7 @@ def load_cv_kps(csv):

return keypoints

# 保存desc到本地文件
def save_desc(filename, desc):
m, n = desc.shape
with open(filename, 'w') as f:
Expand Down
6 changes: 3 additions & 3 deletions eval/eval_nonrigid.sh
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
#Please put here the path of images and TPS files from nonrigid benchmark
PATH_IMGS='/srv/storage/datasets/nonrigiddataset/eval_bench/All_PNG'
PATH_TPS='/homeLocal/guipotje/sshfs/datasets/gt_tps'
PATH_IMGS='/root/workspace/code/ex1/DALF_CVPR_2023/dataset/eval/image/Kinect1/Bag1'
PATH_TPS='/root/workspace/code/ex1/DALF_CVPR_2023/dataset/eval/tps/Kinect1/Bag1'

#Set working dir to save results. Please change
working_dir='/tmp/nonrigid_eval'
working_dir='/root/workspace/code/ex1/DALF_CVPR_2023/result'
mkdir -p $working_dir

#############################################################################
Expand Down
6 changes: 6 additions & 0 deletions eval/mean.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
0.10368644825861915
0.1264971258431803
0.46426299884445216
0.3475967964285022
0.24007131081249222
0.10698793112228643
42 changes: 42 additions & 0 deletions eval/new1.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
#Please put here the path of images and TPS files from nonrigid benchmark
PATH_IMGS='/root/workspace/data/DALF/eval/image'
PATH_TPS='/root/workspace/data/DALF/eval/tps'

#Set working dir to save results. Please change
working_dir='/root/workspace/code/ex1/DALF_CVPR_2023/result'
mkdir -p $working_dir

#############################################################################

#Scripts Path
extract_gt_path='./extract_gt.py'
benchmark_path='./dalf_benchmark.py'
metrics_path='./plotUnorderedPR.py'

#For final eval
ablation='model_ts1_80000_final'

#Data Path
network_path='/root/workspace/code/ex1/DALF_CVPR_2023/weights/model_ts1_80000_final.pth'

#Original TPS files
tps_dir_o=$PATH_TPS
#Local copy of TPS files
tps_dir=$working_dir'/gt_tps_'$ablation

#Output path
out_path=$working_dir'/out_'$ablation

echo 'copying original gt_tps '$tps_dir_o' to '$tps_dir
cp -rf $tps_dir_o $tps_dir
python3 $extract_gt_path -i $PATH_IMGS --tps_dir $tps_dir --dir -m pgdeal --net_path $network_path
python3 $benchmark_path -i $PATH_IMGS -o $out_path --dir --sift --tps_path $tps_dir --net_path $network_path
#Remove old results cache
rm *.dict
#Show metric results
inputdir=$out_path
#Metric type: [MS, MMA, inliers]
metric=MS
# python3 $metrics_path -i $inputdir/Kinect1 -d --tps_path $tps_dir --mode erase --metric $metric
# python3 $metrics_path -i $inputdir/SimulationICCV -d --tps_path $tps_dir --mode erase --metric $metric
# python3 $metrics_path -i $inputdir/SimulationICCV -d --tps_path $tps_dir --mode append --metric $metric --gmean
39 changes: 39 additions & 0 deletions eval/new2.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#Please put here the path of images and TPS files from nonrigid benchmark
PATH_IMGS='/root/workspace/data/DALF/eval/image'
PATH_TPS='/root/workspace/data/DALF/eval/tps'

#Set working dir to save results. Please change
working_dir='/root/workspace/code/ex1/DALF_CVPR_2023/result'

#############################################################################

#Scripts Path
extract_gt_path='./extract_gt.py'
benchmark_path='./dalf_benchmark.py'
metrics_path='./plotUnorderedPR.py'

#For final eval
ablation='model_ts1_80000_final'

#Data Path
network_path='/root/workspace/code/ex1/DALF_CVPR_2023/weights/model_ts1_80000_final.pth'

#Original TPS files
tps_dir_o=$PATH_TPS
#Local copy of TPS files
tps_dir=$working_dir'/gt_tps_'$ablation

#Output path
out_path=$working_dir'/out_'$ablation

echo 'copying original gt_tps '$tps_dir_o' to '$tps_dir
#Remove old results cache
rm *.dict
#Show metric results
inputdir=$out_path
#Metric type: [MS, MMA, inliers]
metric=MS
# python3 $metrics_path -i $inputdir/Kinect1 -d --tps_path $tps_dir --mode erase --metric $metric
# python3 $metrics_path -i $inputdir/Kinect1 -d --tps_path $tps_dir --mode erase --metric $metric --gmean
python3 $metrics_path -i $inputdir/SimulationICCV -d --tps_path $tps_dir --mode erase --metric $metric
python3 $metrics_path -i $inputdir/SimulationICCV -d --tps_path $tps_dir --mode append --metric $metric --gmean
7 changes: 7 additions & 0 deletions eval/table.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
& \multicolumn{1}{l}{kanagawa\_rot ($18$)}& $\underline{0.10}$ \\
& \multicolumn{1}{l}{lascaux\_rot ($18$)}& $\underline{0.13}$ \\
& \multicolumn{1}{l}{lascaux\_scale ($3$)}& $\underline{0.46}$ \\
& \multicolumn{1}{l}{kanagawa\_scale ($3$)}& $\underline{0.35}$ \\
& \multicolumn{1}{l}{chambre\_scale ($3$)}& $\underline{0.24}$ \\
& \multicolumn{1}{l}{chambre\_rot ($18$)}& $\underline{0.11}$ \\

2 changes: 2 additions & 0 deletions eval/table_means.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
& dalf
& MS & $0.23$
Loading