前言
下面将对数据利用支持向量机算法得到结果。
代码
#标准化数据
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
#读取文件
# readFileName="dataset\german-全标准化.xls"
readFileName="dataset\german.xls"
#读取excel
df=pd.read_excel(readFileName)
# list_columns=list(df.columns[:-1])
x=df.ix[:,:-1]
y=df.ix[:,-1]
names=x.columns
#random_state 相当于随机数种子
# x_train,x_test,y_train,y_test=train_test_split(x,y,stratify=y,random_state=38)
x_train,x_test,y_train,y_test=train_test_split(x,y,stratify=y,train_size=0.6,random_state=38)
x_test2,x_check,y_test2,y_check=train_test_split(x_test,y_test,train_size=0.25,random_state=38)
svm=SVC()
svm.fit(x_train,y_train)
print("accuracy on the training subset:{:.3f}".format(svm.score(x_train,y_train)))
print("accuracy on the test subset:{:.3f}".format(svm.score(x_check,y_check)))
'''
accuracy on the training subset:1.000
accuracy on the test subset:0.700
'''
#观察数据是否标准化
# plt.tick_params(labelsize=8.5)
# plt.plot(names,x_train.min(axis=0),'o',label='Min')
# plt.plot(names,x_train.max(axis=0),'v',label='Max')
# plt.xlabel('Feature Index')
# plt.ylabel('Feature magnitude in log scale')
# plt.yscale('log')
# plt.xticks(rotation=90)
# plt.legend(loc='upper right')
#标准化数据
x_train_scaled = preprocessing.scale(x_train)
x_train_scaled = preprocessing.scale(x_train_scaled)
x_test_scaled = preprocessing.scale(x_check)
x_test_scaled = preprocessing.scale(x_test_scaled)
svm1=SVC()
svm1.fit(x_train_scaled,y_train)
print("accuracy on the scaled training subset:{:.3f}".format(svm1.score(x_train_scaled,y_train)))
print("accuracy on the scaled test subset:{:.3f}".format(svm1.score(x_test_scaled,y_check)))
'''
accuracy on the scaled training subset:0.867
accuracy on the scaled test subset:0.800
'''
# 改变C参数,调优,kernel表示核函数,用于平面转换,probability表示是否需要计算概率
# c相当于惩罚松弛变量,c值小,对误分类的惩罚减小,允许容错,泛化能力较强
# gamma:'rbf','poly' 和'sigmoid'的核函数参数。默认是’auto’
# kernel :核函数,默认是rbf,可以是'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'
# probability:是否启用概率估计。 这必须在调用fit()之前启用,并且使fit()方法速度变慢
svm2=SVC(C=1,gamma="auto",kernel='rbf',probability=True)
svm2.fit(x_train_scaled,y_train)
print("after c parameter=10,accuracy on the scaled training subset:{:.3f}".format(svm2.score(x_train_scaled,y_train)))
print("after c parameter=10,accuracy on the scaled test subset:{:.3f}".format(svm2.score(x_test_scaled,y_check)))
'''
after c parameter=10,accuracy on the scaled training subset:0.972
after c parameter=10,accuracy on the scaled test subset:0.716
'''
# plt.show()
#计算样本点到分割超平面的函数距离
#print (svm2.decision_function(x_train_scaled))
#print (svm2.decision_function(x_train_scaled)[:20]>0)
#支持向量机分类
print(svm2.classes_)
# 输出概率
# print(svm2.predict_proba(x_test_scaled))
print(len(svm2.predict_proba(x_test_scaled)))
# a = svm2.predict_proba(x_test_scaled)
# b = svm2.predict(x_test_scaled)
# c = []
# for i in range(len(a)):
# if (a[i][1] > 0.5 and b[i] == 0):
# print(a[i],b[i],i+1)
#判断数据属于哪一类,0或1表示
print(svm2.predict(x_test_scaled))
# plt.show()
原文地址:https://www.cnblogs.com/LieDra/p/12018568.html
时间: 2024-10-04 01:20:41