-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathidecutils.py
105 lines (89 loc) · 3.21 KB
/
idecutils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# -*- coding: utf-8 -*-
#
# Copyright © dawnranger.
#
# 2018-05-08 10:15 <[email protected]>
#
# Distributed under terms of the MIT license.
from __future__ import division, print_function
import numpy as np
import torch
from torch.utils.data import Dataset
from sklearn.metrics import normalized_mutual_info_score,f1_score, adjusted_rand_score, cluster,accuracy_score,precision_score,recall_score
from munkres import Munkres
pre = precision_score
rec = recall_score
Fscore = f1_score
def load_mnist(path='./data/mnist.npz'):
f = np.load(path)
x_train, y_train, x_test, y_test = f['x_train'], f['y_train'], f[
'x_test'], f['y_test']
f.close()
x = np.concatenate((x_train, x_test))
y = np.concatenate((y_train, y_test)).astype(np.int32)
x = x.reshape((x.shape[0], -1)).astype(np.float32)
x = np.divide(x, 255.)
print('MNIST samples', x.shape)
return x, y
class MnistDataset(Dataset):
def __init__(self):
self.x, self.y = load_mnist()
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
return torch.from_numpy(np.array(self.x[idx])), torch.from_numpy(
np.array(self.y[idx])), torch.from_numpy(np.array(idx))
#######################################################
# Evaluate Critiron
#######################################################
def cluster_acc(y_true, y_pred):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
from sklearn.utils.linear_assignment_ import linear_assignment
ind = linear_assignment(w.max() - w)
return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size
def best_map(L1,L2):
#L1 should be the groundtruth labels and L2 should be the clustering labels we got
Label1 = np.unique(L1)
nClass1 = len(Label1)
Label2 = np.unique(L2)
nClass2 = len(Label2)
nClass = np.maximum(nClass1,nClass2)
G = np.zeros((nClass,nClass))
for i in range(nClass1):
ind_cla1 = L1 == Label1[i]
ind_cla1 = ind_cla1.astype(float)
for j in range(nClass2):
ind_cla2 = L2 == Label2[j]
ind_cla2 = ind_cla2.astype(float)
G[i,j] = np.sum(ind_cla2 * ind_cla1)
m = Munkres()
index = m.compute(-G.T)
index = np.array(index)
c = index[:,1]
newL2 = np.zeros(L2.shape)
for i in range(nClass2):
newL2[L2 == Label2[i]] = Label1[c[i]]
return newL2
def acc_rate(gt_s, s):
c_x = best_map(gt_s,s)
err_x = np.sum(gt_s[:] == c_x[:])
accrate = err_x.astype(float) / (gt_s.shape[0])
return accrate
def purity_score(y_true, y_pred):
# compute contingency matrix (also called confusion matrix)
contingency_matrix = cluster.contingency_matrix(y_true, y_pred)
# return purity
return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)