forked from andresantonioriveros/pyRF
-
Notifications
You must be signed in to change notification settings - Fork 0
/
find_fit.py
106 lines (75 loc) · 3.4 KB
/
find_fit.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# coding=utf-8
# Este script lo ocupe para encontrar el mejor fit para un arbol de decision entre artas combinaciones
# de parametros
# -------------------------------------------------------------------------------------------------
import itertools
import argparse
import sys
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import cross_validation
import pandas as pd
import metrics
if __name__ == '__main__':
# Recibo parámetros de la linea de comandos
print ' '.join(sys.argv)
parser = argparse.ArgumentParser()
parser.add_argument('--n_processes', required=True, type=int)
parser.add_argument('--folds', required=True, type=int)
parser.add_argument('--model', default='sktree', choices=['rf', 'sktree'])
parser.add_argument('--catalog', default='MACHO', choices=['MACHO', 'EROS', 'OGLE'])
parser.add_argument('--n_samples', required=False, default=100, type=int)
parser.add_argument('--sets_path', required=True, type=str)
parser.add_argument('--result_dir', required=True, type=str)
parser.add_argument('--feature_filter', nargs='*', type=str)
args = parser.parse_args(sys.argv[1:])
n_processes = args.n_processes
folds = args.folds
model = args.model
catalog = args.catalog
n_samples = args.n_samples
sets_path = args.sets_path
result_dir = args.result_dir
feature_filter = args.feature_filter
# Parametros a probar
min_samples_splits = range(2, 20, 2)
max_depths = range(8,16, 2)
params = [a for a in itertools.product(min_samples_splits, max_depths)]
for min_samples_split, max_depth in params:
# path = '/Users/npcastro/workspace/Features/sets/MACHO/Macho regular set 40.csv'
path = '/Users/npcastro/workspace/Features/sets/EROS/EROS regular set 40.csv'
data = pd.read_csv(path)
data = data.dropna(axis=0, how='any')
y = data['class']
data = data.drop('class', axis=1)
skf = cross_validation.StratifiedKFold(y, n_folds=folds)
results = []
count = 1
for train_index, test_index in skf:
print 'Fold: ' + str(count)
count += 1
train_X, test_X = data.iloc[train_index], data.iloc[test_index]
train_y, test_y = y.iloc[train_index], y.iloc[test_index]
clf = None
clf = RandomForestClassifier(n_estimators=p, criterion='entropy',
max_depth=14, min_samples_split=20,
n_jobs=2)
clf.fit(train_X, train_y)
results.append(metrics.predict_table(clf, test_X, test_y))
result = pd.concat(results)
matrix = metrics.confusion_matrix(result)
clases = matrix.columns.tolist()
precisions = [metrics.precision(matrix, c) for c in clases]
recalls = [metrics.recall(matrix, c) for c in clases]
f_scores = [metrics.f_score(matrix, c) for c in clases]
w_score = metrics.weighted_f_score(matrix)
# f = open(result_dir + str(max_depth) + ' ' + str(min_samples_split) + '.txt', 'w')
f = open(result_dir + str(p) + '.txt', 'w')
f.write('F_score by class')
f.write('\n')
f.write(str(f_scores))
f.write('\n')
f.write('\n')
f.write('Weighted average: ')
f.write(str(w_score))
f.close()