-
Notifications
You must be signed in to change notification settings - Fork 0
/
read_data.py
251 lines (212 loc) · 9.64 KB
/
read_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
# coding=utf-8
import pandas as pd
import re
def data_preprocessing(df, DROP_THRESHOLD=None, mean_recond=None, by_category=False, redundancy_process=False, check=False,
zero_equal_na=False, augment=False, remove_correlation=False):
# 目前在by_category选项下不支持训练集和测试集分开填充缺失值
assert mean_recond is None if by_category else True
# 识别0为异常值的函数待改进
#assert not zero_equal_na
def __identify_categorical_variable(df):
# 识别工具变量
tool_mark = re.compile(r'[A-Za-z]+_?[A-Za-z]+.*')
categorical_columns = filter(lambda x: re.match(tool_mark, x), df.columns)
#return categorical_columns
return ['TOOL','Tool','TOOL_ID','Tool (#1)','TOOL (#1)','TOOL (#2)','Tool (#2)','Tool (#3)','Tool (#4)','OPERATION_ID',
'Tool (#5)','TOOL (#3)']
def __identify_date(df):
# 识别日期变量
date_column_drop = []
for index, i in enumerate(df.columns):
try:
if re.match(r'20[0,1]\d[0,1]\d[0,1,2,3]\d{1,}', str(long(df[i][df[i].notnull()][0]))):
# if str(long(df.iloc[0, index])).startswith('2017'):
date_column_drop.append(i)
except ValueError:
pass
return date_column_drop
def __create_nan(df, median_thd=0.2):
# 0是某一列的最大或最小值,并且该维度的中位数超过一定的阈值,替换为缺失值
import numpy as np
lower_bool = df.apply(lambda x: x.min() == 0 and x.median() > median_thd)
df.loc[:, df.columns[lower_bool]] = df.loc[:, df.columns[lower_bool]].replace(0, np.nan)
upper_bool = df.apply(lambda x: x.max() == 0 and x.median() < -median_thd)
df.loc[:, df.columns[upper_bool]] = df.loc[:, df.columns[upper_bool]].replace(0, np.nan)
return df
def _data_augmentation(df):
# 相邻两列之差作为新的特征,条件是两个维度在同一数量级下
augmented_df = df
for i in xrange(df.shape[1] - 1):
first_col = df.iloc[:, i]
last_col = df.iloc[:,i + 1]
if last_col.mean() / first_col.mean() > 10 or last_col.mean() / first_col.mean() < 0.1:
continue
else:
augmented_df = pd.concat(
[augmented_df, pd.Series(last_col - first_col, name=last_col.name + '-' + first_col.name)],
axis=1)
return augmented_df
if zero_equal_na:
df = __create_nan(df)
if mean_recond is None:
print 'Train_data preprocessing...'
print df.shape
# 此时传入train_data
# 删除表中全部为NaN的列
df.dropna(axis=1, how='all', inplace=True)
print df.shape
# 记录分类变量
categorical_columns = __identify_categorical_variable(df)
if check:
df.loc[:,categorical_columns].to_csv('explore/check_categorical.csv')
# 丢弃日期变量
date_columns = __identify_date(df)
if check:
df.loc[:,date_columns].to_csv('explore/check_date.csv')
df.drop(labels=date_columns, axis=1, inplace=True)
print df.shape
# 丢弃众数占比高于阈值的特征,跳过分类变量
column_drop = []
for i in set(df.columns)-set(categorical_columns):
if df[i][df[i] == df[i].mode()[0]].shape[0] >= DROP_THRESHOLD * (df.shape[0]-df[i].isnull().sum()):
column_drop.append(i)
if check:
df.loc[:,column_drop].to_csv('explore/check_threshold.csv')
df = df.drop(labels=column_drop, axis=1)
print df.shape
if not by_category:
#用训练集的均值填充缺失值
mean_recond = df.mean()
df = df.fillna(mean_recond,inplace=True)
#用众数
#df.fillna(df.mode().iloc[0],inplace=True)
return df,mean_recond
else:
import categorical_processing
# 特征按照工具变量分块
feature_dict = categorical_processing.feature_subgrouping(df, categorical_columns)
final_df = pd.DataFrame(index=df.index)
redundancy_dict = {}
for category in categorical_columns:
partial_df = categorical_processing.chunk_dataframe_generator(df, feature_dict, category)
# 使用工具变量取值相同的数据的中位数填充缺失值
partial_df = partial_df.groupby(category).apply(lambda x: x.fillna(x.median())).reset_index(level=0, drop=True)
# 如果一个特征在工具变量的某个取值下全部为缺失值,丢弃该特征
partial_df = partial_df.dropna(axis=1, how='any')
#partial_df = partial_df.fillna(0)
if augment:
# 相邻两个特征的差作为新的特征
partial_df = pd.concat([partial_df[category], _data_augmentation(partial_df.iloc[:, 1:])], axis=1)
if remove_correlation:
# 相邻特征的相关度超过阈值的,采取指定的处理方法
from dimensionality_reduction import correlation_remove
partial_df = correlation_remove(partial_df, threshold=0.85, method='remove')
if redundancy_process:
# 同一工具变量下的冗余维度移除
from dimensionality_reduction import redundancy_processing
partial_df, result_dict = redundancy_processing(partial_df)
redundancy_dict.update(result_dict)
if check:
result_df = pd.DataFrame()
for col in result_dict.itervalues():
result_df = pd.concat([result_df, df.loc[:, col]], axis=1)
result_df.to_csv('explore/check_redundancy.csv')
final_df = pd.concat([final_df, partial_df], axis=1)
return final_df
else:
print 'Test_data preprocessing...'
#此时传入test_data
#用全部样本填充缺失值
df = df.fillna(mean_recond,inplace=True)
return df
def data_split(data,mode=None,DROP_THRESHOLD=None,by_category=False,redundancy_process=False, check=False,
zero_equal_na=False, augment=False, remove_correlation=False):
def _data_duplicate_process(data):
# 同样index的数据在第二条记录加上后缀
data['temp_id'] = data.index
data.index = map(lambda x, y: x + '_new' if y else x, data.index, data.duplicated(subset='temp_id'))
data['temp_id'] = data.index
assert data.duplicated(subset='temp_id').sum() == 0
data.drop(labels=['temp_id'], axis=1, inplace=True)
return data
# Train data and test data split
assert mode in ['online','offline']
print 'Data spliting... in %s mode' %mode
if mode == 'offline':
# 线下测试
SPLIT_PTG = 0.2
import random
random.seed(49)
index = range(data.shape[0])
random.shuffle(index)
# 训练数据和测试数据共同预处理,之后再分离
data = _data_duplicate_process(data)
data = data_preprocessing(data, DROP_THRESHOLD=DROP_THRESHOLD, redundancy_process=redundancy_process, by_category=by_category,
zero_equal_na=zero_equal_na, augment=augment, remove_correlation=remove_correlation, check=check)
test_data = data.iloc[index[:int(SPLIT_PTG * data.shape[0])]]
test_score = test_data.Value
test_data = test_data.drop(labels='Value', axis=1)
train_data = data.iloc[index[int(SPLIT_PTG * data.shape[0]):]]
train_score = train_data.Value
train_data = train_data.drop(labels='Value', axis=1)
assert train_data.shape[1] == test_data.shape[1]
print train_data.shape, test_data.shape
if check:
train_data.to_csv('explore/train_data_for_explore.csv')
test_data.to_csv('explore/test_data_for_explore.csv')
return train_data,train_score,test_data,test_score
elif mode == 'online':
# 线上测试
data = _data_duplicate_process(data)
train_score = data.Value
train_data = data.drop(labels='Value', axis=1)
test_data = pd.read_csv('test_A_20180117.csv', index_col=0, header=0)
test_data = _data_duplicate_process(test_data)
assert train_data.shape[1] == test_data.shape[1]
assert train_data.shape[0] == len(train_score)
# 训练数据和测试数据共同预处理,之后再分离
full_data = pd.concat([train_data, test_data], axis=0)
full_data = data_preprocessing(full_data, DROP_THRESHOLD=DROP_THRESHOLD, redundancy_process=redundancy_process,
by_category=by_category, zero_equal_na=zero_equal_na, augment=augment,
remove_correlation=remove_correlation, check=check)
train_data = full_data.loc[train_data.index,:]
test_data = full_data.loc[test_data.index,:]
print train_data.shape,test_data.shape
if check:
train_data.to_csv('explore/train_data_for_explore.csv')
test_data.to_csv('explore/test_data_for_explore.csv')
return train_data,train_score,test_data
if __name__ == '__main__':
import cPickle
import os
try:
os.makedirs('explore')
except OSError:
pass
# 全局变量设置,以保证线上线下参数的一致
DROP_THRESHOLD = 0.95
REDUNDANCY_PROCESS = True
BY_CATEGORY = True
ZERO_EQUAL_NA = True
AUGMENT = False
REMOVE_CORRELATION = False
CHECK = True
# 训练数据的读入以及异常处理
data = pd.read_csv('train_20180117.csv', index_col=0, header=0)
'''
# 线下测试
train_data, train_score, test_data, test_score = data_split(data, mode='offline', DROP_THRESHOLD=DROP_THRESHOLD,
redundancy_process=REDUNDANCY_PROCESS,
by_category=BY_CATEGORY, zero_equal_na=ZERO_EQUAL_NA,
augment=AUGMENT, remove_correlation=REMOVE_CORRELATION,
check=CHECK)
cPickle.dump((train_data, train_score, test_data, test_score), open('offline_data.pkl', 'w'))
'''
# 线上测试
train_data, train_score, test_data = data_split(data, mode='online', DROP_THRESHOLD=DROP_THRESHOLD,
redundancy_process=REDUNDANCY_PROCESS,
by_category=BY_CATEGORY, zero_equal_na=ZERO_EQUAL_NA,
augment=AUGMENT, remove_correlation=REMOVE_CORRELATION,
check=CHECK)
import cPickle
cPickle.dump((train_data, train_score, test_data), open('online_data.pkl', 'w'))