-
Notifications
You must be signed in to change notification settings - Fork 0
/
svm.py
122 lines (94 loc) · 4.53 KB
/
svm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# coding=gbk
import pandas as pd
import numpy as np
from imblearn.over_sampling import SMOTE
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn import metrics
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.model_selection import GridSearchCV, StratifiedKFold, train_test_split
from sklearn.preprocessing import FunctionTransformer
columns = ['学号', '性别', '生源地', '总分', '幻觉、妄想症状', '自杀意图', '焦虑指标总分', '抑郁指标总分', '偏执指标总分', '自卑指标总分',
'敏感指标总分', '社交恐惧指标总分', '躯体化指标总分', '依赖指标总分', '敌对攻击指标总分', '冲动指标总分', '强迫指标总分',
'网络成瘾指标总分', '自伤行为指标总分', '进食问题指标总分', '睡眠困扰指标总分', '学校适应困难指标总分', '人际关系困扰指标总分',
'学业压力指标总分', '就业压力指标总分', '恋爱困扰指标总分']
def standard_svm():
data = pd.read_csv('student_data.csv')
data.drop(columns=columns, inplace=True)
train_df, test_df = train_test_split(data, test_size=0.2, random_state=11)
X_train = train_df['text'].tolist()
X_test = test_df['text'].tolist()
y_train = train_df['可能问题'].tolist()
y_test = test_df['可能问题'].tolist()
pipeline = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
parameters = {
'vect__ngram_range': [(1, 2), (1, 3)],
'clf__alpha': (1e-4, 1e-6)
}
# Training config
kfold = StratifiedKFold(n_splits=5)
scoring = {'Accuracy': 'accuracy', 'F1': 'f1_macro'}
refit = 'F1'
gs_clf = GridSearchCV(pipeline, parameters, n_jobs=1, verbose=1, cv=kfold, scoring=scoring, refit=refit)
gs_clf.fit(X_train, y_train)
predicted = gs_clf.predict(X_test)
print(metrics.classification_report(y_test, predicted))
print(metrics.confusion_matrix(y_test, predicted))
print("precision: ", str(metrics.precision_score(y_test, predicted, average='weighted')))
print("accuracy: ", str(metrics.accuracy_score(y_test, predicted)))
print("F1 score: ", str(metrics.f1_score(y_test, predicted, average='weighted')))
print("recall: ", str(metrics.recall_score(y_test, predicted, average='weighted')))
# Create Function Transformer to use Feature Union
def get_numeric_data(x):
return np.array(x.iloc[:, 0:-2])
def get_text_data(x):
return x['text'].tolist()
def metadata_svm_fu():
# train_df = pd.read_csv('./frac=0.8/training_set_0.8.csv')
# test_df = pd.read_csv('./frac=0.8/testing_set_0.8.csv')
data = pd.read_csv('student_data.csv', encoding='utf-8')
data.drop(columns=columns, inplace=True)
train_df, test_df = train_test_split(data, test_size=0.2, random_state=8)
y_train = train_df['可能问题'].tolist()
y_test = test_df['可能问题'].tolist()
transformer_numeric = FunctionTransformer(get_numeric_data)
transformer_text = FunctionTransformer(get_text_data)
# Create a pipeline to concatenate Tfidf Vector and Numeric data
# Use SVM as classifier
pipeline = Pipeline([
('metadata', FeatureUnion([
('numeric_feature', Pipeline([
('selector', transformer_numeric)
])),
('text_features', Pipeline([
('selector', transformer_text),
('vec', TfidfVectorizer())
]))
])),
('clf', SGDClassifier())
])
# Grid Search Parameters for SGDClassifer
parameters = {
'clf__alpha': (1e-4, 1e-6),
'metadata__text_features__vec__ngram_range': [(1, 2), (1, 3)],
'metadata__text_features__vec__use_idf': [True, False]
}
# Training config
kfold = StratifiedKFold(n_splits=5)
scoring = {'Accuracy': 'accuracy', 'F1': 'f1_macro'}
refit = 'F1'
gs_clf = GridSearchCV(pipeline, parameters, n_jobs=1, verbose=1, cv=kfold, scoring=scoring, refit=refit)
gs_clf.fit(train_df, y_train)
predicted = gs_clf.predict(test_df)
print(metrics.classification_report(y_test, predicted))
print(metrics.confusion_matrix(y_test, predicted))
print("precision: ", str(metrics.precision_score(y_test, predicted, average='weighted')))
print("accuracy: ", str(metrics.accuracy_score(y_test, predicted)))
print("F1 score: ", str(metrics.f1_score(y_test, predicted, average='weighted')))
print("recall: ", str(metrics.recall_score(y_test, predicted, average='weighted')))
if __name__ == '__main__':
metadata_svm_fu()
# standard_svm()