本文共 17548 字,大约阅读时间需要 58 分钟。
import pandas as pdimport matplotlib.pyplot as pltimport numpy as np%matplotlib inline
data = pd.read_csv("creditcard.csv")data.head()
#查看样本是否平衡
count_classes = pd.value_counts(data['Class'], sort = True).sort_index()count_classes.plot(kind = 'bar')plt.title("Fraud class histogram")plt.xlabel("Class")plt.ylabel("Frequency")
显然样本不平衡,现在有两种策略来平衡样本,一种是下采样策略,让0和1样本一样少;另外一种是过采样策略,让1样本生成到与0同样多
另外Time列数据用不到,Amount列的数据起伏较大,在机器学习过程中可能误以为数值大的权重较大,故需要进行标准化或者归一化:
from sklearn.preprocessing import StandardScalerdata['normAmount'] = StandardScaler().fit_transform(data['Amount'].reshape(-1, 1))data = data.drop(['Time','Amount'],axis=1)data.head()
#下采样策略
X = data.ix[:, data.columns != 'Class']y = data.ix[:, data.columns == 'Class']# Number of data points in the minority classnumber_records_fraud = len(data[data.Class == 1])fraud_indices = np.array(data[data.Class == 1].index)# Picking the indices of the normal classesnormal_indices = data[data.Class == 0].index# Out of the indices we picked, randomly select "x" number (number_records_fraud)random_normal_indices = np.random.choice(normal_indices, number_records_fraud, replace = False)random_normal_indices = np.array(random_normal_indices)# Appending the 2 indicesunder_sample_indices = np.concatenate([fraud_indices,random_normal_indices])# Under sample datasetunder_sample_data = data.iloc[under_sample_indices,:]X_undersample = under_sample_data.ix[:, under_sample_data.columns != 'Class']y_undersample = under_sample_data.ix[:, under_sample_data.columns == 'Class']# Showing ratioprint("Percentage of normal transactions: ", len(under_sample_data[under_sample_data.Class == 0])/len(under_sample_data))print("Percentage of fraud transactions: ", len(under_sample_data[under_sample_data.Class == 1])/len(under_sample_data))print("Total number of transactions in resampled data: ", len(under_sample_data))
Percentage of normal transactions: 0.5Percentage of fraud transactions: 0.5Total number of transactions in resampled data: 984#交叉验证
from sklearn.cross_validation import train_test_split# Whole datasetX_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.3, random_state = 0)print("Number transactions train dataset: ", len(X_train))print("Number transactions test dataset: ", len(X_test))print("Total number of transactions: ", len(X_train)+len(X_test))# Undersampled datasetX_train_undersample, X_test_undersample, y_train_undersample, y_test_undersample = train_test_split(X_undersample ,y_undersample ,test_size = 0.3 ,random_state = 0)print("")print("Number transactions train dataset: ", len(X_train_undersample))print("Number transactions test dataset: ", len(X_test_undersample))print("Total number of transactions: ", len(X_train_undersample)+len(X_test_undersample))
Number transactions train dataset: 199364Number transactions test dataset: 85443Total number of transactions: 284807Number transactions train dataset: 688Number transactions test dataset: 296Total number of transactions: 984
#模型评估方法
#Recall = TP/(TP+FN)from sklearn.linear_model import LogisticRegressionfrom sklearn.cross_validation import KFold, cross_val_scorefrom sklearn.metrics import confusion_matrix,recall_score,classification_report
def printing_Kfold_scores(x_train_data,y_train_data): fold = KFold(len(y_train_data),5,shuffle=False) # Different C parameters #正则化惩罚项 c_param_range = [0.01,0.1,1,10,100] results_table = pd.DataFrame(index = range(len(c_param_range),2), columns = ['C_parameter','Mean recall score']) results_table['C_parameter'] = c_param_range # the k-fold will give 2 lists: train_indices = indices[0], test_indices = indices[1] j = 0 for c_param in c_param_range: print('-------------------------------------------') print('C parameter: ', c_param) print('-------------------------------------------') print('') recall_accs = [] for iteration, indices in enumerate(fold,start=1): # Call the logistic regression model with a certain C parameter lr = LogisticRegression(C = c_param, penalty = 'l1') # Use the training data to fit the model. In this case, we use the portion of the fold to train the model # with indices[0]. We then predict on the portion assigned as the 'test cross validation' with indices[1] lr.fit(x_train_data.iloc[indices[0],:],y_train_data.iloc[indices[0],:].values.ravel()) # Predict values using the test indices in the training data y_pred_undersample = lr.predict(x_train_data.iloc[indices[1],:].values) # Calculate the recall score and append it to a list for recall scores representing the current c_parameter recall_acc = recall_score(y_train_data.iloc[indices[1],:].values,y_pred_undersample) recall_accs.append(recall_acc) print('Iteration ', iteration,': recall score = ', recall_acc) # The mean value of those recall scores is the metric we want to save and get hold of. results_table.ix[j,'Mean recall score'] = np.mean(recall_accs) j += 1 print('') print('Mean recall score ', np.mean(recall_accs)) print('') best_c = results_table.loc[results_table['Mean recall score'].idxmax()]['C_parameter'] # Finally, we can check which C parameter is the best amongst the chosen. print('*********************************************************************************') print('Best model to choose from cross validation is with C parameter = ', best_c) print('*********************************************************************************') return best_c
best_c = printing_Kfold_scores(X_train_undersample,y_train_undersample)
-------------------------------------------C parameter: 0.01-------------------------------------------Iteration 1 : recall score = 0.958904109589Iteration 2 : recall score = 0.917808219178Iteration 3 : recall score = 1.0Iteration 4 : recall score = 0.972972972973Iteration 5 : recall score = 0.954545454545Mean recall score 0.960846151257-------------------------------------------C parameter: 0.1-------------------------------------------Iteration 1 : recall score = 0.835616438356Iteration 2 : recall score = 0.86301369863Iteration 3 : recall score = 0.915254237288Iteration 4 : recall score = 0.932432432432Iteration 5 : recall score = 0.878787878788Mean recall score 0.885020937099-------------------------------------------C parameter: 1-------------------------------------------Iteration 1 : recall score = 0.835616438356Iteration 2 : recall score = 0.86301369863Iteration 3 : recall score = 0.966101694915Iteration 4 : recall score = 0.945945945946Iteration 5 : recall score = 0.893939393939Mean recall score 0.900923434357-------------------------------------------C parameter: 10-------------------------------------------Iteration 1 : recall score = 0.849315068493Iteration 2 : recall score = 0.86301369863Iteration 3 : recall score = 0.966101694915Iteration 4 : recall score = 0.959459459459Iteration 5 : recall score = 0.893939393939Mean recall score 0.906365863087-------------------------------------------C parameter: 100-------------------------------------------Iteration 1 : recall score = 0.86301369863Iteration 2 : recall score = 0.86301369863Iteration 3 : recall score = 0.966101694915Iteration 4 : recall score = 0.959459459459Iteration 5 : recall score = 0.893939393939Mean recall score 0.909105589115*********************************************************************************Best model to choose from cross validation is with C parameter = 0.01*********************************************************************************
#混淆矩阵
def plot_confusion_matrix(cm, classes, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=0) plt.yticks(tick_marks, classes) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label')
#下采样策略混淆矩阵import itertoolslr = LogisticRegression(C = best_c, penalty = 'l1')lr.fit(X_train_undersample,y_train_undersample.values.ravel())y_pred_undersample = lr.predict(X_test_undersample.values)# Compute confusion matrixcnf_matrix = confusion_matrix(y_test_undersample,y_pred_undersample)np.set_printoptions(precision=2)print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))# Plot non-normalized confusion matrixclass_names = [0,1]plt.figure()plot_confusion_matrix(cnf_matrix , classes=class_names , title='Confusion matrix')plt.show()
#完整数据集混淆矩阵lr = LogisticRegression(C = best_c, penalty = 'l1')lr.fit(X_train_undersample,y_train_undersample.values.ravel())y_pred = lr.predict(X_test.values)# Compute confusion matrixcnf_matrix = confusion_matrix(y_test,y_pred)np.set_printoptions(precision=2)print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))# Plot non-normalized confusion matrixclass_names = [0,1]plt.figure()plot_confusion_matrix(cnf_matrix , classes=class_names , title='Confusion matrix')plt.show()
best_c = printing_Kfold_scores(X_train,y_train)
-------------------------------------------C parameter: 0.01-------------------------------------------Iteration 1 : recall score = 0.492537313433Iteration 2 : recall score = 0.602739726027Iteration 3 : recall score = 0.683333333333Iteration 4 : recall score = 0.569230769231Iteration 5 : recall score = 0.45Mean recall score 0.559568228405-------------------------------------------C parameter: 0.1-------------------------------------------Iteration 1 : recall score = 0.567164179104Iteration 2 : recall score = 0.616438356164Iteration 3 : recall score = 0.683333333333Iteration 4 : recall score = 0.584615384615Iteration 5 : recall score = 0.525Mean recall score 0.595310250644-------------------------------------------C parameter: 1-------------------------------------------Iteration 1 : recall score = 0.55223880597Iteration 2 : recall score = 0.616438356164Iteration 3 : recall score = 0.716666666667Iteration 4 : recall score = 0.615384615385Iteration 5 : recall score = 0.5625Mean recall score 0.612645688837-------------------------------------------C parameter: 10-------------------------------------------Iteration 1 : recall score = 0.55223880597Iteration 2 : recall score = 0.616438356164Iteration 3 : recall score = 0.733333333333Iteration 4 : recall score = 0.615384615385Iteration 5 : recall score = 0.575Mean recall score 0.61847902217-------------------------------------------C parameter: 100-------------------------------------------Iteration 1 : recall score = 0.55223880597Iteration 2 : recall score = 0.616438356164Iteration 3 : recall score = 0.733333333333Iteration 4 : recall score = 0.615384615385Iteration 5 : recall score = 0.575Mean recall score 0.61847902217*********************************************************************************Best model to choose from cross validation is with C parameter = 10.0*********************************************************************************
lr = LogisticRegression(C = best_c, penalty = 'l1')lr.fit(X_train,y_train.values.ravel())y_pred_undersample = lr.predict(X_test.values)# Compute confusion matrixcnf_matrix = confusion_matrix(y_test,y_pred_undersample)np.set_printoptions(precision=2)print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))# Plot non-normalized confusion matrixclass_names = [0,1]plt.figure()plot_confusion_matrix(cnf_matrix , classes=class_names , title='Confusion matrix')plt.show()
#逻辑回归阈值对结果的影响
lr = LogisticRegression(C = 0.01, penalty = 'l1')lr.fit(X_train_undersample,y_train_undersample.values.ravel())y_pred_undersample_proba = lr.predict_proba(X_test_undersample.values)thresholds = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]plt.figure(figsize=(10,10))j = 1for i in thresholds: y_test_predictions_high_recall = y_pred_undersample_proba[:,1] > i plt.subplot(3,3,j) j += 1 # Compute confusion matrix cnf_matrix = confusion_matrix(y_test_undersample,y_test_predictions_high_recall) np.set_printoptions(precision=2) print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1])) # Plot non-normalized confusion matrix class_names = [0,1] plot_confusion_matrix(cnf_matrix , classes=class_names , title='Threshold >= %s'%i)
Recall metric in the testing dataset: 1.0Recall metric in the testing dataset: 1.0Recall metric in the testing dataset: 1.0Recall metric in the testing dataset: 0.986394557823Recall metric in the testing dataset: 0.931972789116Recall metric in the testing dataset: 0.884353741497Recall metric in the testing dataset: 0.836734693878Recall metric in the testing dataset: 0.748299319728Recall metric in the testing dataset: 0.571428571429
#过采样策略、SMOTE生成策略
import pandas as pdfrom imblearn.over_sampling import SMOTEfrom sklearn.ensemble import RandomForestClassifierfrom sklearn.metrics import confusion_matrixfrom sklearn.model_selection import train_test_split
credit_cards=pd.read_csv('creditcard.csv')columns=credit_cards.columns# The labels are in the last column ('Class'). Simply remove it to obtain features columnsfeatures_columns=columns.delete(len(columns)-1)features=credit_cards[features_columns]labels=credit_cards['Class']
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=0.2, random_state=0)
oversampler=SMOTE(random_state=0)os_features,os_labels=oversampler.fit_sample(features_train,labels_train)
len(os_labels[os_labels==1])
227454
os_features = pd.DataFrame(os_features)os_labels = pd.DataFrame(os_labels)best_c = printing_Kfold_scores(os_features,os_labels)
-------------------------------------------C parameter: 0.01-------------------------------------------Iteration 1 : recall score = 0.890322580645Iteration 2 : recall score = 0.894736842105Iteration 3 : recall score = 0.968861347792Iteration 4 : recall score = 0.957595541926Iteration 5 : recall score = 0.958430881173Mean recall score 0.933989438728-------------------------------------------C parameter: 0.1-------------------------------------------Iteration 1 : recall score = 0.890322580645Iteration 2 : recall score = 0.894736842105Iteration 3 : recall score = 0.970410534469Iteration 4 : recall score = 0.959980655302Iteration 5 : recall score = 0.960178498807Mean recall score 0.935125822266-------------------------------------------C parameter: 1-------------------------------------------Iteration 1 : recall score = 0.890322580645Iteration 2 : recall score = 0.894736842105Iteration 3 : recall score = 0.970454796946Iteration 4 : recall score = 0.96014552489Iteration 5 : recall score = 0.960596168431Mean recall score 0.935251182603-------------------------------------------C parameter: 10-------------------------------------------Iteration 1 : recall score = 0.890322580645Iteration 2 : recall score = 0.894736842105Iteration 3 : recall score = 0.97065397809Iteration 4 : recall score = 0.960343368396Iteration 5 : recall score = 0.960530220596Mean recall score 0.935317397966-------------------------------------------C parameter: 100-------------------------------------------Iteration 1 : recall score = 0.890322580645Iteration 2 : recall score = 0.894736842105Iteration 3 : recall score = 0.970543321899Iteration 4 : recall score = 0.960211472725Iteration 5 : recall score = 0.960903924995Mean recall score 0.935343628474*********************************************************************************Best model to choose from cross validation is with C parameter = 100.0*********************************************************************************
lr = LogisticRegression(C = best_c, penalty = 'l1')lr.fit(os_features,os_labels.values.ravel())y_pred = lr.predict(features_test.values)# Compute confusion matrixcnf_matrix = confusion_matrix(labels_test,y_pred)np.set_printoptions(precision=2)print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))# Plot non-normalized confusion matrixclass_names = [0,1]plt.figure()plot_confusion_matrix(cnf_matrix , classes=class_names , title='Confusion matrix')plt.show()