-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathLocalization.py
72 lines (53 loc) · 2.8 KB
/
Localization.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import cv2
import numpy as np
from sklearn.neighbors import NearestNeighbors
import open3d
import os.path
import utilities as utl
from Resection import quaternion
from asift import asift_main
from config import fx, fy, cx, cy, x0, y0, k1, k2, p1, p2, accept_error
from config import point_cloud_path, rover_images_path, ortho_image_path
geo_transform = utl.get_geotransform(ortho_image_path)
cloud = np.asarray(open3d.io.read_point_cloud(point_cloud_path).points)
images = [os.path.join(rover_images_path, name) for name in os.listdir(rover_images_path)]
image_count = len(os.listdir(rover_images_path))
# The following undistorting process only needs to be executed once
"""
for distort_image in images:
utl.image_undistort(distort_image, fx, fy, cx, cy, k1, k2, p1, p2, distort_image)
"""
def localization(manual_override=False):
positions = np.zeros((image_count, 2))
with utl.Timer("Building KDTree ..."):
kd_tree = NearestNeighbors(n_neighbors=1, n_jobs=-1)
kd_tree.fit(cloud[:, 0: 2])
for index in range(1, image_count + 1):
print(f"Station {index}")
# Affine-SIFT matching, automatic mode
if not manual_override:
kp_pairs = asift_main(rf"C:\Users\Lincoln\Project\0529_left_cam\{index}.png", ortho_image_path)
query, base = utl.convert_keypoint(kp_pairs)
else:
query, base = utl.manual_pick_keypoints(rf"C:\Users\Lincoln\Project\0529_left_cam\{index}.png", ortho_image_path, index)
data_size = len(query)
# Transform pixel coordinates to image space coordinates
image_space_coordinates = np.hstack((query[:, 0].reshape((data_size, 1)) - cx, cy - query[:, 1].reshape((data_size, 1))))
# Query object space coordinate from point cloud
object_space_coordinates = utl.get_XYZ_value(np.hstack((base[:, 1].reshape((data_size, 1)), base[:, 0].reshape((data_size, 1)))), geo_transform, cloud, kd_tree)
# Perform space resection
# Please notice that the indexing convention for resection method is (col, row) or (x, y)
current_localization_result = quaternion(image_space_coordinates, object_space_coordinates, (fx + fy) * 0.5, x0, y0, accept_error).flatten()
positions[index - 1] = current_localization_result[0: 2]
if __name__ == '__main__':
print(positions)
return positions
# Invoke localization method and acquire object space positions
positions = localization(manual_override=False)
# Transform object space coordinates back to image pixel coordinate of UAV-generated map
base_image_coordinates = utl.inverse_geotransform(positions, geo_transform)
np.save('result.npy', base_image_coordinates)
print("Coordinates saved!")
# Visualize localization result
base_image = cv2.imread(ortho_image_path)
utl.visualize_localization(base_image, base_image_coordinates)