-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathXGBOOST_PD.py
126 lines (107 loc) · 4.58 KB
/
XGBOOST_PD.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import RobustScaler
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, precision_score, recall_score
from operator import itemgetter
import xgboost as xgb
df = pd.read_csv('./pd_speech_features.csv')
label = pd.Series(df.iloc[1:, -1].values, name=df.iloc[0, -1]).astype(np.int8)
patientId = pd.Series(df.iloc[1:, 0].values,
name=df.iloc[0, 0]).astype(np.int16)
gender = pd.Series(df.iloc[1:, 1].values,
name=df.iloc[0, 1]).astype(np.int8)
baselineFeats = pd.DataFrame(
df.iloc[1:, 2:23].values, columns=df.iloc[0, 2:23]).astype(np.float64)
intensityFeats = pd.DataFrame(
df.iloc[1:, 23:26].values, columns=df.iloc[0, 23:26]).astype(np.float64)
formatFeats = pd.DataFrame(
df.iloc[1:, 26:30].values, columns=df.iloc[0, 26:30]).astype(np.float64)
bandwidthFeats = pd.DataFrame(
df.iloc[1:, 30:34].values, columns=df.iloc[0, 30:34]).astype(np.float64)
vocalFeats = pd.DataFrame(
df.iloc[1:, 34:56].values, columns=df.iloc[0, 34:56]).astype(np.float64)
mfccFeats = pd.DataFrame(
df.iloc[1:, 56:140].values, columns=df.iloc[0, 56:140]).astype(np.float64)
waveletFeats = pd.DataFrame(
df.iloc[1:, 140:322].values, columns=df.iloc[0, 140:322]).astype(np.float64)
tqwtFeats = pd.DataFrame(
df.iloc[1:, 322:-1].values, columns=df.iloc[0, 322:-1]).astype(np.float64)
data = {"patientId": patientId,
"gender": gender,
"baselineFeats": baselineFeats,
"intensityFeats": intensityFeats,
"formantFeats": formatFeats,
"bandwidthFeats": bandwidthFeats,
"vocalFeats": vocalFeats,
"mfccFeats": mfccFeats,
"waveletFeats": waveletFeats,
"tqwtFeats": tqwtFeats,
"label": label}
features = ["gender", "baselineFeats", "intensityFeats", "formantFeats",
"bandwidthFeats", "vocalFeats", "mfccFeats", "waveletFeats", "tqwtFeats"]
def convert_data(data, features):
if len(features) == 1:
return data[features[0]]
return pd.concat(itemgetter(*features)(data), axis=1).values
def XGBOOST_PD(params):
X = convert_data(data, features)
y = data['label']
folds = KFold(n_splits=params['number_of_folds'], shuffle=True)
scores = {'accuracy': [], 'fscore': [], 'precision': [], 'recall': []}
conf_mats = []
for train_idx, test_idx in folds.split(X):
x_train, x_test = X[train_idx], X[test_idx]
y_train, y_test = y[train_idx], y[test_idx]
scaler = MinMaxScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
xgb_cls = xgb.XGBClassifier(colsample_bytree=params['colsample_bytree'],
learning_rate=params['learning_rate'], max_depth=params['max_depth'], n_estimators=params['n_estimators'],
subsample=params['subsample'], gamma=params['gamma'])
xgb_cls.fit(x_train, y_train)
preds = xgb_cls.predict(x_test)
y_pred = np.round(preds)
scores['accuracy'].append(accuracy_score(y_test, y_pred))
scores['fscore'].append(f1_score(y_test, y_pred))
conf_mats.append(confusion_matrix(y_test, y_pred))
return [scores, conf_mats]
def running_XGBOOST(params):
best_result = 0
best_fscore = 0
sum = 0
number_of_runs = params['number_of_runs']
for i in range(number_of_runs):
scores, confs = XGBOOST_PD(params)
x = 100 * np.mean(scores['accuracy'])
fscore = 100 * np.mean(scores['fscore'])
sum = sum + x
if x > best_result:
best_result = x
best_fscore = fscore
best_confs = confs
print(f"{i + 1}th run completed...")
average = sum/number_of_runs
print("******************")
print(f"ACCURACY: {best_result:1.2f}")
print(f"F_SCORE: {best_fscore:1.2f}" )
print(f"Average accuracy on the {params['number_of_runs']} runs: {average: 1.2f}")
print("******************")
print("CONFUSION MATRICES:")
for i in range(len(best_confs)):
print(best_confs[i])
print("******************")
print(f"Number of Folds: {params['number_of_folds']}")
print(f"Number of runs: {params['number_of_runs']}")
params = {'number_of_folds': 5,
'number_of_runs': 500,
'colsample_bytree': 0.35,
'subsample': 0.75,
'learning_rate': 0.1,
'max_depth': 4,
'n_estimators': 325,
'gamma': 0.2}
if __name__ == "__main__":
running_XGBOOST(params)