-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtest_model.py
113 lines (90 loc) · 3.8 KB
/
test_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
from tqdm import tqdm
import numpy as np
import torch
from uae_explore import encode_with_uae
import json
from ego_trajectory_encoder import EgoTrajectoryEncoder
from npz_trajectory import NpzTrajectory
from trajectory_encoder_dataset import TrajectoryEncoderDataset
from torch.utils.data import DataLoader
def compute_similarities(
model_path: str = "/home/pmueller/llama_traffic/models/trajectory_encoder_wv_mae.pth",
trajectory_path="/storage_local/fzi_datasets_tmp/waymo_open_motion_dataset/unzipped/train-2e6/vehicle_d_13657_00002_4856147881.npz",
) -> dict:
model = EgoTrajectoryEncoder()
model.load_state_dict(
torch.load("/home/pmueller/llama_traffic/models/trajectory_encoder.pth")
)
model.eval()
# dataset = TrajectoryEncoderDataset()
# data_loader = DataLoader(dataset, batch_size=1)
npz_trajectory = NpzTrajectory(
"/storage_local/fzi_datasets_tmp/waymo_open_motion_dataset/unzipped/train-2e6/vehicle_d_13657_00002_4856147881.npz"
)
coordinates = list(
zip(npz_trajectory.coordinates["X"], npz_trajectory.coordinates["Y"])
)
coordinates = torch.Tensor(coordinates)
# coordinates.to("cuda")
# input = next(iter(data_loader)).to("cuda")
print("Test")
coordinates = coordinates.unsqueeze(0)
with open("output/test.txt", "w") as file:
model.eval()
# model.to("cuda")
with torch.no_grad():
output = model(coordinates)
torch.set_printoptions(profile="full")
file.write(str(output))
print(output.shape)
with open("datasets/uae_buckets_cache.json") as cache:
print("Test2")
loaded_cache = json.load(cache)
similarities = {}
for bucket in loaded_cache.keys():
similarities[bucket] = np.dot(np.array(loaded_cache[bucket]), output.T)
print(similarities)
return similarities
# sims = compute_similarities()
# print(max(sims, key=sims.get))
def benchmark_model_retrieval():
"""This method transforms the raw calculations of the trajectory encoder to their corresponding best matching bucket based on the cosine similarity.
The output is a dictionary with the trajectory's key and its key and the corresponding best matching bucket as its value.
{"vehicle_d_13657_00002_4856147881.npz": "Straight,
"vehicle_d_13657_00032_4852345321.npz": Right-U-Turn,
...
}
"""
similarities = {}
preds = {}
# labeled_path = "datasets/direction_labeled_npz_vehicle_a.json"
with open("datasets/encoder_output_vehicle_a_cos.json") as enc_output:
print("Before loading encoded data")
enc_output_data = json.load(enc_output)
print("After loading encoded data")
# enc_output_data = enc_output.read()
with open("output/test_3.json") as processed:
print("Second")
processed_data = processed.read()
processed_data = json.loads(processed_data)
processed_keys = processed_data.keys()
enc_output_keys = [key.split("/")[-1] for key in processed_data.keys()]
with open("datasets/uae_buckets_cache.json") as cache:
print("Third")
cache_data = json.load(cache)
for index in tqdm(range(len(processed_keys))):
encoder_embedding = np.array(enc_output_data[list(enc_output_keys)[index]])
print(encoder_embedding)
for bucket in cache_data.keys():
similarities[bucket] = np.dot(
cache_data[bucket],
encoder_embedding.T,
)
print(similarities)
max_sim = max(similarities, key=similarities.get)
pred_key = enc_output_keys[index]
preds[pred_key] = max_sim
with open("datasets/encoder_preds_vehicle_a_cos.json", "w") as output:
json.dump(preds, output, indent=4)
print("finished")
benchmark_model_retrieval()